1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclarationName.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/FormatString.h"
30#include "clang/AST/NSAPI.h"
31#include "clang/AST/NonTrivialTypeVisitor.h"
32#include "clang/AST/OperationKinds.h"
33#include "clang/AST/Stmt.h"
34#include "clang/AST/TemplateBase.h"
35#include "clang/AST/Type.h"
36#include "clang/AST/TypeLoc.h"
37#include "clang/AST/UnresolvedSet.h"
38#include "clang/Basic/AddressSpaces.h"
39#include "clang/Basic/CharInfo.h"
40#include "clang/Basic/Diagnostic.h"
41#include "clang/Basic/IdentifierTable.h"
42#include "clang/Basic/LLVM.h"
43#include "clang/Basic/LangOptions.h"
44#include "clang/Basic/OpenCLOptions.h"
45#include "clang/Basic/OperatorKinds.h"
46#include "clang/Basic/PartialDiagnostic.h"
47#include "clang/Basic/SourceLocation.h"
48#include "clang/Basic/SourceManager.h"
49#include "clang/Basic/Specifiers.h"
50#include "clang/Basic/SyncScope.h"
51#include "clang/Basic/TargetBuiltins.h"
52#include "clang/Basic/TargetCXXABI.h"
53#include "clang/Basic/TargetInfo.h"
54#include "clang/Basic/TypeTraits.h"
55#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
56#include "clang/Sema/Initialization.h"
57#include "clang/Sema/Lookup.h"
58#include "clang/Sema/Ownership.h"
59#include "clang/Sema/Scope.h"
60#include "clang/Sema/ScopeInfo.h"
61#include "clang/Sema/Sema.h"
62#include "clang/Sema/SemaInternal.h"
63#include "llvm/ADT/APFloat.h"
64#include "llvm/ADT/APInt.h"
65#include "llvm/ADT/APSInt.h"
66#include "llvm/ADT/ArrayRef.h"
67#include "llvm/ADT/DenseMap.h"
68#include "llvm/ADT/FoldingSet.h"
69#include "llvm/ADT/None.h"
70#include "llvm/ADT/Optional.h"
71#include "llvm/ADT/STLExtras.h"
72#include "llvm/ADT/SmallBitVector.h"
73#include "llvm/ADT/SmallPtrSet.h"
74#include "llvm/ADT/SmallString.h"
75#include "llvm/ADT/SmallVector.h"
76#include "llvm/ADT/StringRef.h"
77#include "llvm/ADT/StringSwitch.h"
78#include "llvm/ADT/Triple.h"
79#include "llvm/Support/AtomicOrdering.h"
80#include "llvm/Support/Casting.h"
81#include "llvm/Support/Compiler.h"
82#include "llvm/Support/ConvertUTF.h"
83#include "llvm/Support/ErrorHandling.h"
84#include "llvm/Support/Format.h"
85#include "llvm/Support/Locale.h"
86#include "llvm/Support/MathExtras.h"
87#include "llvm/Support/raw_ostream.h"
88#include <algorithm>
89#include <cassert>
90#include <cstddef>
91#include <cstdint>
92#include <functional>
93#include <limits>
94#include <string>
95#include <tuple>
96#include <utility>
97
98using namespace clang;
99using namespace sema;
100
101SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
102 unsigned ByteNo) const {
103 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
104 Context.getTargetInfo());
105}
106
107/// Checks that a call expression's argument count is the desired number.
108/// This is useful when doing custom type-checking. Returns true on error.
109static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
110 unsigned argCount = call->getNumArgs();
111 if (argCount == desiredArgCount) return false;
112
113 if (argCount < desiredArgCount)
114 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
115 << 0 /*function call*/ << desiredArgCount << argCount
116 << call->getSourceRange();
117
118 // Highlight all the excess arguments.
119 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
120 call->getArg(argCount - 1)->getEndLoc());
121
122 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
123 << 0 /*function call*/ << desiredArgCount << argCount
124 << call->getArg(1)->getSourceRange();
125}
126
127/// Check that the first argument to __builtin_annotation is an integer
128/// and the second argument is a non-wide string literal.
129static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
130 if (checkArgCount(S, TheCall, 2))
131 return true;
132
133 // First argument should be an integer.
134 Expr *ValArg = TheCall->getArg(0);
135 QualType Ty = ValArg->getType();
136 if (!Ty->isIntegerType()) {
137 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
138 << ValArg->getSourceRange();
139 return true;
140 }
141
142 // Second argument should be a constant string.
143 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
144 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
145 if (!Literal || !Literal->isAscii()) {
146 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
147 << StrArg->getSourceRange();
148 return true;
149 }
150
151 TheCall->setType(Ty);
152 return false;
153}
154
155static bool SemaBuiltinCHERICapCreate(Sema &S, CallExpr *TheCall) {
156 if (checkArgCount(S, TheCall, 3))
157 return true;
158
159 QualType FnType = TheCall->getArg(2)->getType();
160 auto FnAttrType = FnType->getAs<AttributedType>();
161
162 // FIXME: Proper error
163 if (FnAttrType->getAttrKind() != attr::CHERICCallee) {
164 fprintf(stderr, "Argument must be a cheri_ccallee thingy\n");
165 return true;
166 }
167 // FIXME: Typecheck args 0 and 1
168 ASTContext &C = S.Context;
169 // FIXME: Error on null
170 auto BaseFnTy = cast<FunctionProtoType>(FnAttrType->getModifiedType());
171 auto ReturnFnTy = C.adjustFunctionType(BaseFnTy,
172 BaseFnTy->getExtInfo().withCallingConv(CC_CHERICCallback));
173 auto ReturnTy = C.getPointerType(QualType(ReturnFnTy, 0), ASTContext::PIK_Default);
174
175 TheCall->setType(ReturnTy);
176 return false;
177}
178
179
180static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
181 // We need at least one argument.
182 if (TheCall->getNumArgs() < 1) {
183 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
184 << 0 << 1 << TheCall->getNumArgs()
185 << TheCall->getCallee()->getSourceRange();
186 return true;
187 }
188
189 // All arguments should be wide string literals.
190 for (Expr *Arg : TheCall->arguments()) {
191 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
192 if (!Literal || !Literal->isWide()) {
193 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
194 << Arg->getSourceRange();
195 return true;
196 }
197 }
198
199 return false;
200}
201
202/// Check that the argument to __builtin_addressof is a glvalue, and set the
203/// result type to the corresponding pointer type.
204static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
205 if (checkArgCount(S, TheCall, 1))
206 return true;
207
208 ExprResult Arg(TheCall->getArg(0));
209 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
210 if (ResultType.isNull())
211 return true;
212
213 TheCall->setArg(0, Arg.get());
214 TheCall->setType(ResultType);
215 return false;
216}
217
218static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID,
219 bool PowerOfTwo) {
220 if (checkArgCount(S, TheCall, 2))
221 return true;
222
223 clang::Expr *Source = TheCall->getArg(0);
224 clang::Expr *AlignOp = TheCall->getArg(1);
225 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned ||
226 ID == Builtin::BI__builtin_is_p2aligned;
227
228 auto IsValidIntegerType = [](QualType Ty) {
229 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
230 };
231 if (!IsValidIntegerType(AlignOp->getType())) {
232 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
233 << AlignOp->getType();
234 return true;
235 }
236
237 QualType SrcTy = Source->getType();
238 // Should also be able to use it with arrays (but not functions!)
239 bool IsArrayToPointerDecay =
240 SrcTy->canDecayToPointerType() && SrcTy->isArrayType();
241 if (!SrcTy->isPointerType() && !IsArrayToPointerDecay &&
242 !IsValidIntegerType(SrcTy)) {
243 // TODO: this is not quite the right error message since we don't allow
244 // floating point types, or member pointers
245 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
246 << SrcTy;
247 return true;
248 }
249 // err_argument_invalid_range
250 // TODO: allow zero as an always true result?
251 Expr::EvalResult AlignResult;
252 unsigned MaxAlignmentBits = S.Context.getIntRange(SrcTy) - 1;
253 // Can't check validity of alignment if it is type dependent
254 if (!AlignOp->isInstantiationDependent() && AlignOp->EvaluateAsInt(AlignResult, S.Context, Expr::SE_AllowSideEffects)) {
255 llvm::APSInt AlignValue = AlignResult.Val.getInt();
256 if (PowerOfTwo) {
257 if (AlignValue == 0) {
258 // aligning to 2^0 is always true/a noop -> add the tautological warning
259 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
260 << IsBooleanAlignBuiltin;
261 } else if (AlignValue < 0 || AlignValue > MaxAlignmentBits) {
262 S.Diag(AlignOp->getExprLoc(),
263 diag::err_alignment_power_of_two_out_of_range)
264 << AlignValue.toString(10) << 0 << MaxAlignmentBits;
265 }
266 } else {
267 llvm::APSInt MaxValue(
268 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
269 if (AlignValue < 1) {
270 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
271 return true;
272 } else if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
273 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
274 << MaxValue.toString(10);
275 return true;
276 } else if (AlignValue == 1) {
277 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
278 << IsBooleanAlignBuiltin;
279 } else if (!AlignValue.isPowerOf2()) {
280 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
281 return true;
282 }
283 }
284 }
285
286 TheCall->setArg(0, Source);
287 TheCall->setArg(1, AlignOp);
288 QualType RetTy = Source->getType();
289 if (IsArrayToPointerDecay)
290 RetTy = S.Context.getDecayedType(RetTy);
291 // __builtin_is_aligned() returns bool instead of the same type as Arg1
292 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : RetTy);
293 return false;
294}
295
296static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
297 if (checkArgCount(S, TheCall, 3))
298 return true;
299
300 // First two arguments should be integers.
301 for (unsigned I = 0; I < 2; ++I) {
302 ExprResult Arg = TheCall->getArg(I);
303 QualType Ty = Arg.get()->getType();
304 if (!Ty->isIntegerType()) {
305 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
306 << Ty << Arg.get()->getSourceRange();
307 return true;
308 }
309 InitializedEntity Entity = InitializedEntity::InitializeParameter(
310 S.getASTContext(), Ty, /*consume*/ false);
311 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
312 if (Arg.isInvalid())
313 return true;
314 TheCall->setArg(I, Arg.get());
315 }
316
317 // Third argument should be a pointer to a non-const integer.
318 // IRGen correctly handles volatile, restrict, and address spaces, and
319 // the other qualifiers aren't possible.
320 {
321 ExprResult Arg = TheCall->getArg(2);
322 QualType Ty = Arg.get()->getType();
323 const auto *PtrTy = Ty->getAs<PointerType>();
324 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
325 !PtrTy->getPointeeType().isConstQualified())) {
326 S.Diag(Arg.get()->getBeginLoc(),
327 diag::err_overflow_builtin_must_be_ptr_int)
328 << Ty << Arg.get()->getSourceRange();
329 return true;
330 }
331 InitializedEntity Entity = InitializedEntity::InitializeParameter(
332 S.getASTContext(), Ty, /*consume*/ false);
333 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
334 if (Arg.isInvalid())
335 return true;
336 TheCall->setArg(2, Arg.get());
337 }
338 return false;
339}
340
341static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
342 if (checkArgCount(S, BuiltinCall, 2))
343 return true;
344
345 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
346 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
347 Expr *Call = BuiltinCall->getArg(0);
348 Expr *Chain = BuiltinCall->getArg(1);
349
350 if (Call->getStmtClass() != Stmt::CallExprClass) {
351 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
352 << Call->getSourceRange();
353 return true;
354 }
355
356 auto CE = cast<CallExpr>(Call);
357 if (CE->getCallee()->getType()->isBlockPointerType()) {
358 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
359 << Call->getSourceRange();
360 return true;
361 }
362
363 const Decl *TargetDecl = CE->getCalleeDecl();
364 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
365 if (FD->getBuiltinID()) {
366 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
367 << Call->getSourceRange();
368 return true;
369 }
370
371 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
372 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
373 << Call->getSourceRange();
374 return true;
375 }
376
377 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
378 if (ChainResult.isInvalid())
379 return true;
380 if (!ChainResult.get()->getType()->isPointerType()) {
381 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
382 << Chain->getSourceRange();
383 return true;
384 }
385
386 QualType ReturnTy = CE->getCallReturnType(S.Context);
387 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
388 QualType BuiltinTy = S.Context.getFunctionType(
389 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
390 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
391
392 Builtin =
393 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
394
395 BuiltinCall->setType(CE->getType());
396 BuiltinCall->setValueKind(CE->getValueKind());
397 BuiltinCall->setObjectKind(CE->getObjectKind());
398 BuiltinCall->setCallee(Builtin);
399 BuiltinCall->setArg(1, ChainResult.get());
400
401 return false;
402}
403
404/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
405/// __builtin_*_chk function, then use the object size argument specified in the
406/// source. Otherwise, infer the object size using __builtin_object_size.
407void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
408 CallExpr *TheCall) {
409 // FIXME: There are some more useful checks we could be doing here:
410 // - Analyze the format string of sprintf to see how much of buffer is used.
411 // - Evaluate strlen of strcpy arguments, use as object size.
412
413 if (TheCall->isValueDependent() || TheCall->isTypeDependent())
414 return;
415
416 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
417 if (!BuiltinID)
418 return;
419
420 unsigned DiagID = 0;
421 bool IsChkVariant = false;
422 unsigned SizeIndex, ObjectIndex;
423 switch (BuiltinID) {
424 default:
425 return;
426 case Builtin::BI__builtin___memcpy_chk:
427 case Builtin::BI__builtin___memmove_chk:
428 case Builtin::BI__builtin___memset_chk:
429 case Builtin::BI__builtin___strlcat_chk:
430 case Builtin::BI__builtin___strlcpy_chk:
431 case Builtin::BI__builtin___strncat_chk:
432 case Builtin::BI__builtin___strncpy_chk:
433 case Builtin::BI__builtin___stpncpy_chk:
434 case Builtin::BI__builtin___memccpy_chk: {
435 DiagID = diag::warn_builtin_chk_overflow;
436 IsChkVariant = true;
437 SizeIndex = TheCall->getNumArgs() - 2;
438 ObjectIndex = TheCall->getNumArgs() - 1;
439 break;
440 }
441
442 case Builtin::BI__builtin___snprintf_chk:
443 case Builtin::BI__builtin___vsnprintf_chk: {
444 DiagID = diag::warn_builtin_chk_overflow;
445 IsChkVariant = true;
446 SizeIndex = 1;
447 ObjectIndex = 3;
448 break;
449 }
450
451 case Builtin::BIstrncat:
452 case Builtin::BI__builtin_strncat:
453 case Builtin::BIstrncpy:
454 case Builtin::BI__builtin_strncpy:
455 case Builtin::BIstpncpy:
456 case Builtin::BI__builtin_stpncpy: {
457 // Whether these functions overflow depends on the runtime strlen of the
458 // string, not just the buffer size, so emitting the "always overflow"
459 // diagnostic isn't quite right. We should still diagnose passing a buffer
460 // size larger than the destination buffer though; this is a runtime abort
461 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
462 DiagID = diag::warn_fortify_source_size_mismatch;
463 SizeIndex = TheCall->getNumArgs() - 1;
464 ObjectIndex = 0;
465 break;
466 }
467
468 case Builtin::BImemcpy:
469 case Builtin::BI__builtin_memcpy:
470 case Builtin::BImemmove:
471 case Builtin::BI__builtin_memmove:
472 case Builtin::BImemset:
473 case Builtin::BI__builtin_memset: {
474 DiagID = diag::warn_fortify_source_overflow;
475 SizeIndex = TheCall->getNumArgs() - 1;
476 ObjectIndex = 0;
477 break;
478 }
479 case Builtin::BIsnprintf:
480 case Builtin::BI__builtin_snprintf:
481 case Builtin::BIvsnprintf:
482 case Builtin::BI__builtin_vsnprintf: {
483 DiagID = diag::warn_fortify_source_size_mismatch;
484 SizeIndex = 1;
485 ObjectIndex = 0;
486 break;
487 }
488 }
489
490 llvm::APSInt ObjectSize;
491 // For __builtin___*_chk, the object size is explicitly provided by the caller
492 // (usually using __builtin_object_size). Use that value to check this call.
493 if (IsChkVariant) {
494 Expr::EvalResult Result;
495 Expr *SizeArg = TheCall->getArg(ObjectIndex);
496 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
497 return;
498 ObjectSize = Result.Val.getInt();
499
500 // Otherwise, try to evaluate an imaginary call to __builtin_object_size.
501 } else {
502 // If the parameter has a pass_object_size attribute, then we should use its
503 // (potentially) more strict checking mode. Otherwise, conservatively assume
504 // type 0.
505 int BOSType = 0;
506 if (const auto *POS =
507 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
508 BOSType = POS->getType();
509
510 Expr *ObjArg = TheCall->getArg(ObjectIndex);
511 uint64_t Result;
512 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
513 return;
514 // Get the object size in the target's size_t width.
515 const TargetInfo &TI = getASTContext().getTargetInfo();
516 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
517 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
518 }
519
520 // Evaluate the number of bytes of the object that this call will use.
521 Expr::EvalResult Result;
522 Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
523 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
524 return;
525 llvm::APSInt UsedSize = Result.Val.getInt();
526
527 if (UsedSize.ule(ObjectSize))
528 return;
529
530 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
531 // Skim off the details of whichever builtin was called to produce a better
532 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
533 if (IsChkVariant) {
534 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
535 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
536 } else if (FunctionName.startswith("__builtin_")) {
537 FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
538 }
539
540 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
541 PDiag(DiagID)
542 << FunctionName << ObjectSize.toString(/*Radix=*/10)
543 << UsedSize.toString(/*Radix=*/10));
544}
545
546static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
547 Scope::ScopeFlags NeededScopeFlags,
548 unsigned DiagID) {
549 // Scopes aren't available during instantiation. Fortunately, builtin
550 // functions cannot be template args so they cannot be formed through template
551 // instantiation. Therefore checking once during the parse is sufficient.
552 if (SemaRef.inTemplateInstantiation())
553 return false;
554
555 Scope *S = SemaRef.getCurScope();
556 while (S && !S->isSEHExceptScope())
557 S = S->getParent();
558 if (!S || !(S->getFlags() & NeededScopeFlags)) {
559 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
560 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
561 << DRE->getDecl()->getIdentifier();
562 return true;
563 }
564
565 return false;
566}
567
568static inline bool isBlockPointer(Expr *Arg) {
569 return Arg->getType()->isBlockPointerType();
570}
571
572/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
573/// void*, which is a requirement of device side enqueue.
574static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
575 const BlockPointerType *BPT =
576 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
577 ArrayRef<QualType> Params =
578 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes();
579 unsigned ArgCounter = 0;
580 bool IllegalParams = false;
581 // Iterate through the block parameters until either one is found that is not
582 // a local void*, or the block is valid.
583 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
584 I != E; ++I, ++ArgCounter) {
585 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
586 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
587 LangAS::opencl_local) {
588 // Get the location of the error. If a block literal has been passed
589 // (BlockExpr) then we can point straight to the offending argument,
590 // else we just point to the variable reference.
591 SourceLocation ErrorLoc;
592 if (isa<BlockExpr>(BlockArg)) {
593 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
594 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
595 } else if (isa<DeclRefExpr>(BlockArg)) {
596 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
597 }
598 S.Diag(ErrorLoc,
599 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
600 IllegalParams = true;
601 }
602 }
603
604 return IllegalParams;
605}
606
607static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
608 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
609 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
610 << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
611 return true;
612 }
613 return false;
614}
615
616static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
617 if (checkArgCount(S, TheCall, 2))
618 return true;
619
620 if (checkOpenCLSubgroupExt(S, TheCall))
621 return true;
622
623 // First argument is an ndrange_t type.
624 Expr *NDRangeArg = TheCall->getArg(0);
625 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
626 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
627 << TheCall->getDirectCallee() << "'ndrange_t'";
628 return true;
629 }
630
631 Expr *BlockArg = TheCall->getArg(1);
632 if (!isBlockPointer(BlockArg)) {
633 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
634 << TheCall->getDirectCallee() << "block";
635 return true;
636 }
637 return checkOpenCLBlockArgs(S, BlockArg);
638}
639
640/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
641/// get_kernel_work_group_size
642/// and get_kernel_preferred_work_group_size_multiple builtin functions.
643static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
644 if (checkArgCount(S, TheCall, 1))
645 return true;
646
647 Expr *BlockArg = TheCall->getArg(0);
648 if (!isBlockPointer(BlockArg)) {
649 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
650 << TheCall->getDirectCallee() << "block";
651 return true;
652 }
653 return checkOpenCLBlockArgs(S, BlockArg);
654}
655
656/// Diagnose integer type and any valid implicit conversion to it.
657static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
658 const QualType &IntType);
659
660static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
661 unsigned Start, unsigned End) {
662 bool IllegalParams = false;
663 for (unsigned I = Start; I <= End; ++I)
664 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
665 S.Context.getSizeType());
666 return IllegalParams;
667}
668
669/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
670/// 'local void*' parameter of passed block.
671static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
672 Expr *BlockArg,
673 unsigned NumNonVarArgs) {
674 const BlockPointerType *BPT =
675 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
676 unsigned NumBlockParams =
677 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams();
678 unsigned TotalNumArgs = TheCall->getNumArgs();
679
680 // For each argument passed to the block, a corresponding uint needs to
681 // be passed to describe the size of the local memory.
682 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
683 S.Diag(TheCall->getBeginLoc(),
684 diag::err_opencl_enqueue_kernel_local_size_args);
685 return true;
686 }
687
688 // Check that the sizes of the local memory are specified by integers.
689 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
690 TotalNumArgs - 1);
691}
692
693/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
694/// overload formats specified in Table 6.13.17.1.
695/// int enqueue_kernel(queue_t queue,
696/// kernel_enqueue_flags_t flags,
697/// const ndrange_t ndrange,
698/// void (^block)(void))
699/// int enqueue_kernel(queue_t queue,
700/// kernel_enqueue_flags_t flags,
701/// const ndrange_t ndrange,
702/// uint num_events_in_wait_list,
703/// clk_event_t *event_wait_list,
704/// clk_event_t *event_ret,
705/// void (^block)(void))
706/// int enqueue_kernel(queue_t queue,
707/// kernel_enqueue_flags_t flags,
708/// const ndrange_t ndrange,
709/// void (^block)(local void*, ...),
710/// uint size0, ...)
711/// int enqueue_kernel(queue_t queue,
712/// kernel_enqueue_flags_t flags,
713/// const ndrange_t ndrange,
714/// uint num_events_in_wait_list,
715/// clk_event_t *event_wait_list,
716/// clk_event_t *event_ret,
717/// void (^block)(local void*, ...),
718/// uint size0, ...)
719static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
720 unsigned NumArgs = TheCall->getNumArgs();
721
722 if (NumArgs < 4) {
723 S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args);
724 return true;
725 }
726
727 Expr *Arg0 = TheCall->getArg(0);
728 Expr *Arg1 = TheCall->getArg(1);
729 Expr *Arg2 = TheCall->getArg(2);
730 Expr *Arg3 = TheCall->getArg(3);
731
732 // First argument always needs to be a queue_t type.
733 if (!Arg0->getType()->isQueueT()) {
734 S.Diag(TheCall->getArg(0)->getBeginLoc(),
735 diag::err_opencl_builtin_expected_type)
736 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
737 return true;
738 }
739
740 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
741 if (!Arg1->getType()->isIntegerType()) {
742 S.Diag(TheCall->getArg(1)->getBeginLoc(),
743 diag::err_opencl_builtin_expected_type)
744 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
745 return true;
746 }
747
748 // Third argument is always an ndrange_t type.
749 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
750 S.Diag(TheCall->getArg(2)->getBeginLoc(),
751 diag::err_opencl_builtin_expected_type)
752 << TheCall->getDirectCallee() << "'ndrange_t'";
753 return true;
754 }
755
756 // With four arguments, there is only one form that the function could be
757 // called in: no events and no variable arguments.
758 if (NumArgs == 4) {
759 // check that the last argument is the right block type.
760 if (!isBlockPointer(Arg3)) {
761 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
762 << TheCall->getDirectCallee() << "block";
763 return true;
764 }
765 // we have a block type, check the prototype
766 const BlockPointerType *BPT =
767 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
768 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) {
769 S.Diag(Arg3->getBeginLoc(),
770 diag::err_opencl_enqueue_kernel_blocks_no_args);
771 return true;
772 }
773 return false;
774 }
775 // we can have block + varargs.
776 if (isBlockPointer(Arg3))
777 return (checkOpenCLBlockArgs(S, Arg3) ||
778 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
779 // last two cases with either exactly 7 args or 7 args and varargs.
780 if (NumArgs >= 7) {
781 // check common block argument.
782 Expr *Arg6 = TheCall->getArg(6);
783 if (!isBlockPointer(Arg6)) {
784 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
785 << TheCall->getDirectCallee() << "block";
786 return true;
787 }
788 if (checkOpenCLBlockArgs(S, Arg6))
789 return true;
790
791 // Forth argument has to be any integer type.
792 if (!Arg3->getType()->isIntegerType()) {
793 S.Diag(TheCall->getArg(3)->getBeginLoc(),
794 diag::err_opencl_builtin_expected_type)
795 << TheCall->getDirectCallee() << "integer";
796 return true;
797 }
798 // check remaining common arguments.
799 Expr *Arg4 = TheCall->getArg(4);
800 Expr *Arg5 = TheCall->getArg(5);
801
802 // Fifth argument is always passed as a pointer to clk_event_t.
803 if (!Arg4->isNullPointerConstant(S.Context,
804 Expr::NPC_ValueDependentIsNotNull) &&
805 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
806 S.Diag(TheCall->getArg(4)->getBeginLoc(),
807 diag::err_opencl_builtin_expected_type)
808 << TheCall->getDirectCallee()
809 << S.Context.getPointerType(S.Context.OCLClkEventTy);
810 return true;
811 }
812
813 // Sixth argument is always passed as a pointer to clk_event_t.
814 if (!Arg5->isNullPointerConstant(S.Context,
815 Expr::NPC_ValueDependentIsNotNull) &&
816 !(Arg5->getType()->isPointerType() &&
817 Arg5->getType()->getPointeeType()->isClkEventT())) {
818 S.Diag(TheCall->getArg(5)->getBeginLoc(),
819 diag::err_opencl_builtin_expected_type)
820 << TheCall->getDirectCallee()
821 << S.Context.getPointerType(S.Context.OCLClkEventTy);
822 return true;
823 }
824
825 if (NumArgs == 7)
826 return false;
827
828 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
829 }
830
831 // None of the specific case has been detected, give generic error
832 S.Diag(TheCall->getBeginLoc(),
833 diag::err_opencl_enqueue_kernel_incorrect_args);
834 return true;
835}
836
837/// Returns OpenCL access qual.
838static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
839 return D->getAttr<OpenCLAccessAttr>();
840}
841
842/// Returns true if pipe element type is different from the pointer.
843static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
844 const Expr *Arg0 = Call->getArg(0);
845 // First argument type should always be pipe.
846 if (!Arg0->getType()->isPipeType()) {
847 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
848 << Call->getDirectCallee() << Arg0->getSourceRange();
849 return true;
850 }
851 OpenCLAccessAttr *AccessQual =
852 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
853 // Validates the access qualifier is compatible with the call.
854 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
855 // read_only and write_only, and assumed to be read_only if no qualifier is
856 // specified.
857 switch (Call->getDirectCallee()->getBuiltinID()) {
858 case Builtin::BIread_pipe:
859 case Builtin::BIreserve_read_pipe:
860 case Builtin::BIcommit_read_pipe:
861 case Builtin::BIwork_group_reserve_read_pipe:
862 case Builtin::BIsub_group_reserve_read_pipe:
863 case Builtin::BIwork_group_commit_read_pipe:
864 case Builtin::BIsub_group_commit_read_pipe:
865 if (!(!AccessQual || AccessQual->isReadOnly())) {
866 S.Diag(Arg0->getBeginLoc(),
867 diag::err_opencl_builtin_pipe_invalid_access_modifier)
868 << "read_only" << Arg0->getSourceRange();
869 return true;
870 }
871 break;
872 case Builtin::BIwrite_pipe:
873 case Builtin::BIreserve_write_pipe:
874 case Builtin::BIcommit_write_pipe:
875 case Builtin::BIwork_group_reserve_write_pipe:
876 case Builtin::BIsub_group_reserve_write_pipe:
877 case Builtin::BIwork_group_commit_write_pipe:
878 case Builtin::BIsub_group_commit_write_pipe:
879 if (!(AccessQual && AccessQual->isWriteOnly())) {
880 S.Diag(Arg0->getBeginLoc(),
881 diag::err_opencl_builtin_pipe_invalid_access_modifier)
882 << "write_only" << Arg0->getSourceRange();
883 return true;
884 }
885 break;
886 default:
887 break;
888 }
889 return false;
890}
891
892/// Returns true if pipe element type is different from the pointer.
893static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
894 const Expr *Arg0 = Call->getArg(0);
895 const Expr *ArgIdx = Call->getArg(Idx);
896 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
897 const QualType EltTy = PipeTy->getElementType();
898 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
899 // The Idx argument should be a pointer and the type of the pointer and
900 // the type of pipe element should also be the same.
901 if (!ArgTy ||
902 !S.Context.hasSameType(
903 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
904 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
905 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
906 << ArgIdx->getType() << ArgIdx->getSourceRange();
907 return true;
908 }
909 return false;
910}
911
912// Performs semantic analysis for the read/write_pipe call.
913// \param S Reference to the semantic analyzer.
914// \param Call A pointer to the builtin call.
915// \return True if a semantic error has been found, false otherwise.
916static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
917 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
918 // functions have two forms.
919 switch (Call->getNumArgs()) {
920 case 2:
921 if (checkOpenCLPipeArg(S, Call))
922 return true;
923 // The call with 2 arguments should be
924 // read/write_pipe(pipe T, T*).
925 // Check packet type T.
926 if (checkOpenCLPipePacketType(S, Call, 1))
927 return true;
928 break;
929
930 case 4: {
931 if (checkOpenCLPipeArg(S, Call))
932 return true;
933 // The call with 4 arguments should be
934 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
935 // Check reserve_id_t.
936 if (!Call->getArg(1)->getType()->isReserveIDT()) {
937 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
938 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
939 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
940 return true;
941 }
942
943 // Check the index.
944 const Expr *Arg2 = Call->getArg(2);
945 if (!Arg2->getType()->isIntegerType() &&
946 !Arg2->getType()->isUnsignedIntegerType()) {
947 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
948 << Call->getDirectCallee() << S.Context.UnsignedIntTy
949 << Arg2->getType() << Arg2->getSourceRange();
950 return true;
951 }
952
953 // Check packet type T.
954 if (checkOpenCLPipePacketType(S, Call, 3))
955 return true;
956 } break;
957 default:
958 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
959 << Call->getDirectCallee() << Call->getSourceRange();
960 return true;
961 }
962
963 return false;
964}
965
966// Performs a semantic analysis on the {work_group_/sub_group_
967// /_}reserve_{read/write}_pipe
968// \param S Reference to the semantic analyzer.
969// \param Call The call to the builtin function to be analyzed.
970// \return True if a semantic error was found, false otherwise.
971static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
972 if (checkArgCount(S, Call, 2))
973 return true;
974
975 if (checkOpenCLPipeArg(S, Call))
976 return true;
977
978 // Check the reserve size.
979 if (!Call->getArg(1)->getType()->isIntegerType() &&
980 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
981 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
982 << Call->getDirectCallee() << S.Context.UnsignedIntTy
983 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
984 return true;
985 }
986
987 // Since return type of reserve_read/write_pipe built-in function is
988 // reserve_id_t, which is not defined in the builtin def file , we used int
989 // as return type and need to override the return type of these functions.
990 Call->setType(S.Context.OCLReserveIDTy);
991
992 return false;
993}
994
995// Performs a semantic analysis on {work_group_/sub_group_
996// /_}commit_{read/write}_pipe
997// \param S Reference to the semantic analyzer.
998// \param Call The call to the builtin function to be analyzed.
999// \return True if a semantic error was found, false otherwise.
1000static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1001 if (checkArgCount(S, Call, 2))
1002 return true;
1003
1004 if (checkOpenCLPipeArg(S, Call))
1005 return true;
1006
1007 // Check reserve_id_t.
1008 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1009 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1010 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1011 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1012 return true;
1013 }
1014
1015 return false;
1016}
1017
1018// Performs a semantic analysis on the call to built-in Pipe
1019// Query Functions.
1020// \param S Reference to the semantic analyzer.
1021// \param Call The call to the builtin function to be analyzed.
1022// \return True if a semantic error was found, false otherwise.
1023static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1024 if (checkArgCount(S, Call, 1))
1025 return true;
1026
1027 if (!Call->getArg(0)->getType()->isPipeType()) {
1028 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1029 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1030 return true;
1031 }
1032
1033 return false;
1034}
1035
1036// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1037// Performs semantic analysis for the to_global/local/private call.
1038// \param S Reference to the semantic analyzer.
1039// \param BuiltinID ID of the builtin function.
1040// \param Call A pointer to the builtin call.
1041// \return True if a semantic error has been found, false otherwise.
1042static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1043 CallExpr *Call) {
1044 if (Call->getNumArgs() != 1) {
1045 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num)
1046 << Call->getDirectCallee() << Call->getSourceRange();
1047 return true;
1048 }
1049
1050 auto RT = Call->getArg(0)->getType();
1051 if (!RT->isPointerType() || RT->getPointeeType()
1052 .getAddressSpace() == LangAS::opencl_constant) {
1053 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1054 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1055 return true;
1056 }
1057
1058 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1059 S.Diag(Call->getArg(0)->getBeginLoc(),
1060 diag::warn_opencl_generic_address_space_arg)
1061 << Call->getDirectCallee()->getNameInfo().getAsString()
1062 << Call->getArg(0)->getSourceRange();
1063 }
1064
1065 RT = RT->getPointeeType();
1066 auto Qual = RT.getQualifiers();
1067 switch (BuiltinID) {
1068 case Builtin::BIto_global:
1069 Qual.setAddressSpace(LangAS::opencl_global);
1070 break;
1071 case Builtin::BIto_local:
1072 Qual.setAddressSpace(LangAS::opencl_local);
1073 break;
1074 case Builtin::BIto_private:
1075 Qual.setAddressSpace(LangAS::opencl_private);
1076 break;
1077 default:
1078 llvm_unreachable("Invalid builtin function");
1079 }
1080 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1081 RT.getUnqualifiedType(), Qual)));
1082
1083 return false;
1084}
1085
1086static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
1087 if (checkArgCount(S, TheCall, 1))
1088 return ExprError();
1089
1090 // Compute __builtin_launder's parameter type from the argument.
1091 // The parameter type is:
1092 // * The type of the argument if it's not an array or function type,
1093 // Otherwise,
1094 // * The decayed argument type.
1095 QualType ParamTy = [&]() {
1096 QualType ArgTy = TheCall->getArg(0)->getType();
1097 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1098 return S.Context.getPointerType(Ty->getElementType());
1099 if (ArgTy->isFunctionType()) {
1100 return S.Context.getPointerType(ArgTy);
1101 }
1102 return ArgTy;
1103 }();
1104
1105 TheCall->setType(ParamTy);
1106
1107 auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1108 if (!ParamTy->isPointerType())
1109 return 0;
1110 if (ParamTy->isFunctionPointerType())
1111 return 1;
1112 if (ParamTy->isVoidPointerType())
1113 return 2;
1114 return llvm::Optional<unsigned>{};
1115 }();
1116 if (DiagSelect.hasValue()) {
1117 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1118 << DiagSelect.getValue() << TheCall->getSourceRange();
1119 return ExprError();
1120 }
1121
1122 // We either have an incomplete class type, or we have a class template
1123 // whose instantiation has not been forced. Example:
1124 //
1125 // template <class T> struct Foo { T value; };
1126 // Foo<int> *p = nullptr;
1127 // auto *d = __builtin_launder(p);
1128 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1129 diag::err_incomplete_type))
1130 return ExprError();
1131
1132 assert(ParamTy->getPointeeType()->isObjectType() &&
1133 "Unhandled non-object pointer case");
1134
1135 InitializedEntity Entity =
1136 InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
1137 ExprResult Arg =
1138 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1139 if (Arg.isInvalid())
1140 return ExprError();
1141 TheCall->setArg(0, Arg.get());
1142
1143 return TheCall;
1144}
1145
1146// Emit an error and return true if the current architecture is not in the list
1147// of supported architectures.
1148static bool
1149CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1150 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1151 llvm::Triple::ArchType CurArch =
1152 S.getASTContext().getTargetInfo().getTriple().getArch();
1153 if (llvm::is_contained(SupportedArchs, CurArch))
1154 return false;
1155 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1156 << TheCall->getSourceRange();
1157 return true;
1158}
1159
1160ExprResult
1161Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1162 CallExpr *TheCall) {
1163 ExprResult TheCallResult(TheCall);
1164
1165 // Find out if any arguments are required to be integer constant expressions.
1166 unsigned ICEArguments = 0;
1167 ASTContext::GetBuiltinTypeError Error;
1168 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1169 if (Error != ASTContext::GE_None)
1170 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1171
1172 // If any arguments are required to be ICE's, check and diagnose.
1173 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1174 // Skip arguments not required to be ICE's.
1175 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1176
1177 llvm::APSInt Result;
1178 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
1179 return true;
1180 ICEArguments &= ~(1 << ArgNo);
1181 }
1182
1183 switch (BuiltinID) {
1184 case Builtin::BI__builtin_return_address:
1185 TheCall->setType(Context.VoidPtrTy);
1186 break;
1187 case Builtin::BI__builtin___CFStringMakeConstantString:
1188 assert(TheCall->getNumArgs() == 1 &&
1189 "Wrong # arguments to builtin CFStringMakeConstantString");
1190 if (CheckObjCString(TheCall->getArg(0)))
1191 return ExprError();
1192 break;
1193 case Builtin::BI__builtin_ms_va_start:
1194 case Builtin::BI__builtin_stdarg_start:
1195 case Builtin::BI__builtin_va_start:
1196 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1197 return ExprError();
1198 break;
1199 case Builtin::BI__va_start: {
1200 switch (Context.getTargetInfo().getTriple().getArch()) {
1201 case llvm::Triple::aarch64:
1202 case llvm::Triple::arm:
1203 case llvm::Triple::thumb:
1204 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
1205 return ExprError();
1206 break;
1207 default:
1208 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1209 return ExprError();
1210 break;
1211 }
1212 break;
1213 }
1214
1215 // The acquire, release, and no fence variants are ARM and AArch64 only.
1216 case Builtin::BI_interlockedbittestandset_acq:
1217 case Builtin::BI_interlockedbittestandset_rel:
1218 case Builtin::BI_interlockedbittestandset_nf:
1219 case Builtin::BI_interlockedbittestandreset_acq:
1220 case Builtin::BI_interlockedbittestandreset_rel:
1221 case Builtin::BI_interlockedbittestandreset_nf:
1222 if (CheckBuiltinTargetSupport(
1223 *this, BuiltinID, TheCall,
1224 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
1225 return ExprError();
1226 break;
1227
1228 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
1229 case Builtin::BI_bittest64:
1230 case Builtin::BI_bittestandcomplement64:
1231 case Builtin::BI_bittestandreset64:
1232 case Builtin::BI_bittestandset64:
1233 case Builtin::BI_interlockedbittestandreset64:
1234 case Builtin::BI_interlockedbittestandset64:
1235 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
1236 {llvm::Triple::x86_64, llvm::Triple::arm,
1237 llvm::Triple::thumb, llvm::Triple::aarch64}))
1238 return ExprError();
1239 break;
1240
1241 case Builtin::BI__builtin_isgreater:
1242 case Builtin::BI__builtin_isgreaterequal:
1243 case Builtin::BI__builtin_isless:
1244 case Builtin::BI__builtin_islessequal:
1245 case Builtin::BI__builtin_islessgreater:
1246 case Builtin::BI__builtin_isunordered:
1247 if (SemaBuiltinUnorderedCompare(TheCall))
1248 return ExprError();
1249 break;
1250 case Builtin::BI__builtin_fpclassify:
1251 if (SemaBuiltinFPClassification(TheCall, 6))
1252 return ExprError();
1253 break;
1254 case Builtin::BI__builtin_isfinite:
1255 case Builtin::BI__builtin_isinf:
1256 case Builtin::BI__builtin_isinf_sign:
1257 case Builtin::BI__builtin_isnan:
1258 case Builtin::BI__builtin_isnormal:
1259 case Builtin::BI__builtin_signbit:
1260 case Builtin::BI__builtin_signbitf:
1261 case Builtin::BI__builtin_signbitl:
1262 if (SemaBuiltinFPClassification(TheCall, 1))
1263 return ExprError();
1264 break;
1265 case Builtin::BI__builtin_shufflevector:
1266 return SemaBuiltinShuffleVector(TheCall);
1267 // TheCall will be freed by the smart pointer here, but that's fine, since
1268 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
1269 case Builtin::BI__builtin_prefetch:
1270 if (SemaBuiltinPrefetch(TheCall))
1271 return ExprError();
1272 break;
1273 case Builtin::BI__builtin_alloca_with_align:
1274 if (SemaBuiltinAllocaWithAlign(TheCall))
1275 return ExprError();
1276 break;
1277 case Builtin::BI__assume:
1278 case Builtin::BI__builtin_assume:
1279 if (SemaBuiltinAssume(TheCall))
1280 return ExprError();
1281 break;
1282 case Builtin::BI__builtin_assume_aligned:
1283 case Builtin::BI__builtin_assume_aligned_cap:
1284 if (SemaBuiltinAssumeAligned(TheCall))
1285 return ExprError();
1286 break;
1287 case Builtin::BI__builtin_dynamic_object_size:
1288 case Builtin::BI__builtin_object_size:
1289 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1290 return ExprError();
1291 break;
1292 case Builtin::BI__builtin_longjmp:
1293 if (SemaBuiltinLongjmp(TheCall))
1294 return ExprError();
1295 break;
1296 case Builtin::BI__builtin_setjmp:
1297 if (SemaBuiltinSetjmp(TheCall))
1298 return ExprError();
1299 break;
1300 case Builtin::BI_setjmp:
1301 case Builtin::BI_setjmpex:
1302 if (checkArgCount(*this, TheCall, 1))
1303 return true;
1304 break;
1305 case Builtin::BI__builtin_classify_type:
1306 if (checkArgCount(*this, TheCall, 1)) return true;
1307 TheCall->setType(Context.IntTy);
1308 break;
1309 case Builtin::BI__builtin_constant_p: {
1310 if (checkArgCount(*this, TheCall, 1)) return true;
1311 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
1312 if (Arg.isInvalid()) return true;
1313 TheCall->setArg(0, Arg.get());
1314 TheCall->setType(Context.IntTy);
1315 break;
1316 }
1317 case Builtin::BI__builtin_launder:
1318 return SemaBuiltinLaunder(*this, TheCall);
1319 case Builtin::BI__sync_fetch_and_add:
1320 case Builtin::BI__sync_fetch_and_add_1:
1321 case Builtin::BI__sync_fetch_and_add_2:
1322 case Builtin::BI__sync_fetch_and_add_4:
1323 case Builtin::BI__sync_fetch_and_add_8:
1324 case Builtin::BI__sync_fetch_and_add_16:
1325 case Builtin::BI__sync_fetch_and_sub:
1326 case Builtin::BI__sync_fetch_and_sub_1:
1327 case Builtin::BI__sync_fetch_and_sub_2:
1328 case Builtin::BI__sync_fetch_and_sub_4:
1329 case Builtin::BI__sync_fetch_and_sub_8:
1330 case Builtin::BI__sync_fetch_and_sub_16:
1331 case Builtin::BI__sync_fetch_and_or:
1332 case Builtin::BI__sync_fetch_and_or_1:
1333 case Builtin::BI__sync_fetch_and_or_2:
1334 case Builtin::BI__sync_fetch_and_or_4:
1335 case Builtin::BI__sync_fetch_and_or_8:
1336 case Builtin::BI__sync_fetch_and_or_16:
1337 case Builtin::BI__sync_fetch_and_and:
1338 case Builtin::BI__sync_fetch_and_and_1:
1339 case Builtin::BI__sync_fetch_and_and_2:
1340 case Builtin::BI__sync_fetch_and_and_4:
1341 case Builtin::BI__sync_fetch_and_and_8:
1342 case Builtin::BI__sync_fetch_and_and_16:
1343 case Builtin::BI__sync_fetch_and_xor:
1344 case Builtin::BI__sync_fetch_and_xor_1:
1345 case Builtin::BI__sync_fetch_and_xor_2:
1346 case Builtin::BI__sync_fetch_and_xor_4:
1347 case Builtin::BI__sync_fetch_and_xor_8:
1348 case Builtin::BI__sync_fetch_and_xor_16:
1349 case Builtin::BI__sync_fetch_and_nand:
1350 case Builtin::BI__sync_fetch_and_nand_1:
1351 case Builtin::BI__sync_fetch_and_nand_2:
1352 case Builtin::BI__sync_fetch_and_nand_4:
1353 case Builtin::BI__sync_fetch_and_nand_8:
1354 case Builtin::BI__sync_fetch_and_nand_16:
1355 case Builtin::BI__sync_add_and_fetch:
1356 case Builtin::BI__sync_add_and_fetch_1:
1357 case Builtin::BI__sync_add_and_fetch_2:
1358 case Builtin::BI__sync_add_and_fetch_4:
1359 case Builtin::BI__sync_add_and_fetch_8:
1360 case Builtin::BI__sync_add_and_fetch_16:
1361 case Builtin::BI__sync_sub_and_fetch:
1362 case Builtin::BI__sync_sub_and_fetch_1:
1363 case Builtin::BI__sync_sub_and_fetch_2:
1364 case Builtin::BI__sync_sub_and_fetch_4:
1365 case Builtin::BI__sync_sub_and_fetch_8:
1366 case Builtin::BI__sync_sub_and_fetch_16:
1367 case Builtin::BI__sync_and_and_fetch:
1368 case Builtin::BI__sync_and_and_fetch_1:
1369 case Builtin::BI__sync_and_and_fetch_2:
1370 case Builtin::BI__sync_and_and_fetch_4:
1371 case Builtin::BI__sync_and_and_fetch_8:
1372 case Builtin::BI__sync_and_and_fetch_16:
1373 case Builtin::BI__sync_or_and_fetch:
1374 case Builtin::BI__sync_or_and_fetch_1:
1375 case Builtin::BI__sync_or_and_fetch_2:
1376 case Builtin::BI__sync_or_and_fetch_4:
1377 case Builtin::BI__sync_or_and_fetch_8:
1378 case Builtin::BI__sync_or_and_fetch_16:
1379 case Builtin::BI__sync_xor_and_fetch:
1380 case Builtin::BI__sync_xor_and_fetch_1:
1381 case Builtin::BI__sync_xor_and_fetch_2:
1382 case Builtin::BI__sync_xor_and_fetch_4:
1383 case Builtin::BI__sync_xor_and_fetch_8:
1384 case Builtin::BI__sync_xor_and_fetch_16:
1385 case Builtin::BI__sync_nand_and_fetch:
1386 case Builtin::BI__sync_nand_and_fetch_1:
1387 case Builtin::BI__sync_nand_and_fetch_2:
1388 case Builtin::BI__sync_nand_and_fetch_4:
1389 case Builtin::BI__sync_nand_and_fetch_8:
1390 case Builtin::BI__sync_nand_and_fetch_16:
1391 case Builtin::BI__sync_val_compare_and_swap:
1392 case Builtin::BI__sync_val_compare_and_swap_1:
1393 case Builtin::BI__sync_val_compare_and_swap_2:
1394 case Builtin::BI__sync_val_compare_and_swap_4:
1395 case Builtin::BI__sync_val_compare_and_swap_8:
1396 case Builtin::BI__sync_val_compare_and_swap_16:
1397 case Builtin::BI__sync_bool_compare_and_swap:
1398 case Builtin::BI__sync_bool_compare_and_swap_1:
1399 case Builtin::BI__sync_bool_compare_and_swap_2:
1400 case Builtin::BI__sync_bool_compare_and_swap_4:
1401 case Builtin::BI__sync_bool_compare_and_swap_8:
1402 case Builtin::BI__sync_bool_compare_and_swap_16:
1403 case Builtin::BI__sync_lock_test_and_set:
1404 case Builtin::BI__sync_lock_test_and_set_1:
1405 case Builtin::BI__sync_lock_test_and_set_2:
1406 case Builtin::BI__sync_lock_test_and_set_4:
1407 case Builtin::BI__sync_lock_test_and_set_8:
1408 case Builtin::BI__sync_lock_test_and_set_16:
1409 case Builtin::BI__sync_lock_release:
1410 case Builtin::BI__sync_lock_release_1:
1411 case Builtin::BI__sync_lock_release_2:
1412 case Builtin::BI__sync_lock_release_4:
1413 case Builtin::BI__sync_lock_release_8:
1414 case Builtin::BI__sync_lock_release_16:
1415 case Builtin::BI__sync_swap:
1416 case Builtin::BI__sync_swap_1:
1417 case Builtin::BI__sync_swap_2:
1418 case Builtin::BI__sync_swap_4:
1419 case Builtin::BI__sync_swap_8:
1420 case Builtin::BI__sync_swap_16:
1421 return SemaBuiltinAtomicOverloaded(TheCallResult);
1422 case Builtin::BI__sync_synchronize:
1423 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
1424 << TheCall->getCallee()->getSourceRange();
1425 break;
1426 case Builtin::BI__builtin_nontemporal_load:
1427 case Builtin::BI__builtin_nontemporal_store:
1428 return SemaBuiltinNontemporalOverloaded(TheCallResult);
1429#define BUILTIN(ID, TYPE, ATTRS)
1430#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1431 case Builtin::BI##ID: \
1432 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1433#include "clang/Basic/Builtins.def"
1434 case Builtin::BI__annotation:
1435 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1436 return ExprError();
1437 break;
1438 case Builtin::BI__builtin_annotation:
1439 if (SemaBuiltinAnnotation(*this, TheCall))
1440 return ExprError();
1441 break;
1442 case Builtin::BI__builtin_addressof:
1443 if (SemaBuiltinAddressof(*this, TheCall))
1444 return ExprError();
1445 break;
1446 case Builtin::BI__builtin_is_aligned:
1447 case Builtin::BI__builtin_align_up:
1448 case Builtin::BI__builtin_align_down:
1449 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID, false))
1450 return ExprError();
1451 break;
1452 case Builtin::BI__builtin_is_p2aligned:
1453 case Builtin::BI__builtin_p2align_up:
1454 case Builtin::BI__builtin_p2align_down:
1455 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID, true))
1456 return ExprError();
1457 break;
1458 case Builtin::BI__builtin_add_overflow:
1459 case Builtin::BI__builtin_sub_overflow:
1460 case Builtin::BI__builtin_mul_overflow:
1461 if (SemaBuiltinOverflow(*this, TheCall))
1462 return ExprError();
1463 break;
1464 case Builtin::BI__builtin_operator_new:
1465 case Builtin::BI__builtin_operator_delete: {
1466 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1467 ExprResult Res =
1468 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1469 if (Res.isInvalid())
1470 CorrectDelayedTyposInExpr(TheCallResult.get());
1471 return Res;
1472 }
1473 case Builtin::BI__builtin_dump_struct: {
1474 // We first want to ensure we are called with 2 arguments
1475 if (checkArgCount(*this, TheCall, 2))
1476 return ExprError();
1477 // Ensure that the first argument is of type 'struct XX *'
1478 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1479 const QualType PtrArgType = PtrArg->getType();
1480 if (!PtrArgType->isPointerType() ||
1481 !PtrArgType->getPointeeType()->isRecordType()) {
1482 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1483 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1484 << "structure pointer";
1485 return ExprError();
1486 }
1487
1488 // Ensure that the second argument is of type 'FunctionType'
1489 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1490 const QualType FnPtrArgType = FnPtrArg->getType();
1491 if (!FnPtrArgType->isPointerType()) {
1492 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1493 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1494 << FnPtrArgType << "'int (*)(const char *, ...)'";
1495 return ExprError();
1496 }
1497
1498 const auto *FuncType =
1499 FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1500
1501 if (!FuncType) {
1502 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1503 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1504 << FnPtrArgType << "'int (*)(const char *, ...)'";
1505 return ExprError();
1506 }
1507
1508 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1509 if (!FT->getNumParams()) {
1510 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1511 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1512 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1513 return ExprError();
1514 }
1515 QualType PT = FT->getParamType(0);
1516 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1517 !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1518 !PT->getPointeeType().isConstQualified()) {
1519 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1520 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1521 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1522 return ExprError();
1523 }
1524 }
1525
1526 TheCall->setType(Context.IntTy);
1527 break;
1528 }
1529 case Builtin::BI__builtin_call_with_static_chain:
1530 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1531 return ExprError();
1532 break;
1533 case Builtin::BI__exception_code:
1534 case Builtin::BI_exception_code:
1535 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1536 diag::err_seh___except_block))
1537 return ExprError();
1538 break;
1539 case Builtin::BI__exception_info:
1540 case Builtin::BI_exception_info:
1541 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1542 diag::err_seh___except_filter))
1543 return ExprError();
1544 break;
1545 case Builtin::BI__GetExceptionInfo:
1546 if (checkArgCount(*this, TheCall, 1))
1547 return ExprError();
1548
1549 if (CheckCXXThrowOperand(
1550 TheCall->getBeginLoc(),
1551 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1552 TheCall))
1553 return ExprError();
1554
1555 TheCall->setType(Context.VoidPtrTy);
1556 break;
1557 // Memory capability functions
1558 case Builtin::BI__builtin_cheri_callback_create:
1559 return SemaBuiltinCHERICapCreate(*this, TheCall);
1560 // OpenCL v2.0, s6.13.16 - Pipe functions
1561 case Builtin::BIread_pipe:
1562 case Builtin::BIwrite_pipe:
1563 // Since those two functions are declared with var args, we need a semantic
1564 // check for the argument.
1565 if (SemaBuiltinRWPipe(*this, TheCall))
1566 return ExprError();
1567 break;
1568 case Builtin::BIreserve_read_pipe:
1569 case Builtin::BIreserve_write_pipe:
1570 case Builtin::BIwork_group_reserve_read_pipe:
1571 case Builtin::BIwork_group_reserve_write_pipe:
1572 if (SemaBuiltinReserveRWPipe(*this, TheCall))
1573 return ExprError();
1574 break;
1575 case Builtin::BIsub_group_reserve_read_pipe:
1576 case Builtin::BIsub_group_reserve_write_pipe:
1577 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1578 SemaBuiltinReserveRWPipe(*this, TheCall))
1579 return ExprError();
1580 break;
1581 case Builtin::BIcommit_read_pipe:
1582 case Builtin::BIcommit_write_pipe:
1583 case Builtin::BIwork_group_commit_read_pipe:
1584 case Builtin::BIwork_group_commit_write_pipe:
1585 if (SemaBuiltinCommitRWPipe(*this, TheCall))
1586 return ExprError();
1587 break;
1588 case Builtin::BIsub_group_commit_read_pipe:
1589 case Builtin::BIsub_group_commit_write_pipe:
1590 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1591 SemaBuiltinCommitRWPipe(*this, TheCall))
1592 return ExprError();
1593 break;
1594 case Builtin::BIget_pipe_num_packets:
1595 case Builtin::BIget_pipe_max_packets:
1596 if (SemaBuiltinPipePackets(*this, TheCall))
1597 return ExprError();
1598 break;
1599 case Builtin::BIto_global:
1600 case Builtin::BIto_local:
1601 case Builtin::BIto_private:
1602 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1603 return ExprError();
1604 break;
1605 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1606 case Builtin::BIenqueue_kernel:
1607 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1608 return ExprError();
1609 break;
1610 case Builtin::BIget_kernel_work_group_size:
1611 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1612 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1613 return ExprError();
1614 break;
1615 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1616 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1617 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1618 return ExprError();
1619 break;
1620 case Builtin::BI__builtin_os_log_format:
1621 case Builtin::BI__builtin_os_log_format_buffer_size:
1622 if (SemaBuiltinOSLogFormat(TheCall))
1623 return ExprError();
1624 break;
1625 }
1626
1627 // Since the target specific builtins for each arch overlap, only check those
1628 // of the arch we are compiling for.
1629 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
1630 switch (Context.getTargetInfo().getTriple().getArch()) {
1631 case llvm::Triple::arm:
1632 case llvm::Triple::armeb:
1633 case llvm::Triple::thumb:
1634 case llvm::Triple::thumbeb:
1635 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
1636 return ExprError();
1637 break;
1638 case llvm::Triple::aarch64:
1639 case llvm::Triple::aarch64_be:
1640 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
1641 return ExprError();
1642 break;
1643 case llvm::Triple::hexagon:
1644 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
1645 return ExprError();
1646 break;
1647 case llvm::Triple::mips:
1648 case llvm::Triple::mipsel:
1649 case llvm::Triple::mips64:
1650 case llvm::Triple::mips64el:
1651 case llvm::Triple::cheri:
1652 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
1653 return ExprError();
1654 break;
1655 case llvm::Triple::systemz:
1656 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
1657 return ExprError();
1658 break;
1659 case llvm::Triple::x86:
1660 case llvm::Triple::x86_64:
1661 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
1662 return ExprError();
1663 break;
1664 case llvm::Triple::ppc:
1665 case llvm::Triple::ppc64:
1666 case llvm::Triple::ppc64le:
1667 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
1668 return ExprError();
1669 break;
1670 default:
1671 break;
1672 }
1673 }
1674
1675 return TheCallResult;
1676}
1677
1678// Get the valid immediate range for the specified NEON type code.
1679static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
1680 NeonTypeFlags Type(t);
1681 int IsQuad = ForceQuad ? true : Type.isQuad();
1682 switch (Type.getEltType()) {
1683 case NeonTypeFlags::Int8:
1684 case NeonTypeFlags::Poly8:
1685 return shift ? 7 : (8 << IsQuad) - 1;
1686 case NeonTypeFlags::Int16:
1687 case NeonTypeFlags::Poly16:
1688 return shift ? 15 : (4 << IsQuad) - 1;
1689 case NeonTypeFlags::Int32:
1690 return shift ? 31 : (2 << IsQuad) - 1;
1691 case NeonTypeFlags::Int64:
1692 case NeonTypeFlags::Poly64:
1693 return shift ? 63 : (1 << IsQuad) - 1;
1694 case NeonTypeFlags::Poly128:
1695 return shift ? 127 : (1 << IsQuad) - 1;
1696 case NeonTypeFlags::Float16:
1697 assert(!shift && "cannot shift float types!");
1698 return (4 << IsQuad) - 1;
1699 case NeonTypeFlags::Float32:
1700 assert(!shift && "cannot shift float types!");
1701 return (2 << IsQuad) - 1;
1702 case NeonTypeFlags::Float64:
1703 assert(!shift && "cannot shift float types!");
1704 return (1 << IsQuad) - 1;
1705 }
1706 llvm_unreachable("Invalid NeonTypeFlag!");
1707}
1708
1709/// getNeonEltType - Return the QualType corresponding to the elements of
1710/// the vector type specified by the NeonTypeFlags. This is used to check
1711/// the pointer arguments for Neon load/store intrinsics.
1712static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
1713 bool IsPolyUnsigned, bool IsInt64Long) {
1714 switch (Flags.getEltType()) {
1715 case NeonTypeFlags::Int8:
1716 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
1717 case NeonTypeFlags::Int16:
1718 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
1719 case NeonTypeFlags::Int32:
1720 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
1721 case NeonTypeFlags::Int64:
1722 if (IsInt64Long)
1723 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
1724 else
1725 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
1726 : Context.LongLongTy;
1727 case NeonTypeFlags::Poly8:
1728 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
1729 case NeonTypeFlags::Poly16:
1730 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
1731 case NeonTypeFlags::Poly64:
1732 if (IsInt64Long)
1733 return Context.UnsignedLongTy;
1734 else
1735 return Context.UnsignedLongLongTy;
1736 case NeonTypeFlags::Poly128:
1737 break;
1738 case NeonTypeFlags::Float16:
1739 return Context.HalfTy;
1740 case NeonTypeFlags::Float32:
1741 return Context.FloatTy;
1742 case NeonTypeFlags::Float64:
1743 return Context.DoubleTy;
1744 }
1745 llvm_unreachable("Invalid NeonTypeFlag!");
1746}
1747
1748bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1749 llvm::APSInt Result;
1750 uint64_t mask = 0;
1751 unsigned TV = 0;
1752 int PtrArgNum = -1;
1753 bool HasConstPtr = false;
1754 switch (BuiltinID) {
1755#define GET_NEON_OVERLOAD_CHECK
1756#include "clang/Basic/arm_neon.inc"
1757#include "clang/Basic/arm_fp16.inc"
1758#undef GET_NEON_OVERLOAD_CHECK
1759 }
1760
1761 // For NEON intrinsics which are overloaded on vector element type, validate
1762 // the immediate which specifies which variant to emit.
1763 unsigned ImmArg = TheCall->getNumArgs()-1;
1764 if (mask) {
1765 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
1766 return true;
1767
1768 TV = Result.getLimitedValue(64);
1769 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
1770 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
1771 << TheCall->getArg(ImmArg)->getSourceRange();
1772 }
1773
1774 if (PtrArgNum >= 0) {
1775 // Check that pointer arguments have the specified type.
1776 Expr *Arg = TheCall->getArg(PtrArgNum);
1777 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
1778 Arg = ICE->getSubExpr();
1779 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
1780 QualType RHSTy = RHS.get()->getType();
1781
1782 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
1783 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
1784 Arch == llvm::Triple::aarch64_be;
1785 bool IsInt64Long =
1786 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
1787 QualType EltTy =
1788 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
1789 if (HasConstPtr)
1790 EltTy = EltTy.withConst();
1791 QualType LHSTy = Context.getPointerType(EltTy);
1792 AssignConvertType ConvTy;
1793 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
1794 if (RHS.isInvalid())
1795 return true;
1796 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
1797 RHS.get(), AA_Assigning))
1798 return true;
1799 }
1800
1801 // For NEON intrinsics which take an immediate value as part of the
1802 // instruction, range check them here.
1803 unsigned i = 0, l = 0, u = 0;
1804 switch (BuiltinID) {
1805 default:
1806 return false;
1807 #define GET_NEON_IMMEDIATE_CHECK
1808 #include "clang/Basic/arm_neon.inc"
1809 #include "clang/Basic/arm_fp16.inc"
1810 #undef GET_NEON_IMMEDIATE_CHECK
1811 }
1812
1813 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1814}
1815
1816bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
1817 unsigned MaxWidth) {
1818 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
1819 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1820 BuiltinID == ARM::BI__builtin_arm_strex ||
1821 BuiltinID == ARM::BI__builtin_arm_stlex ||
1822 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1823 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1824 BuiltinID == AArch64::BI__builtin_arm_strex ||
1825 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
1826 "unexpected ARM builtin");
1827 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
1828 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1829 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1830 BuiltinID == AArch64::BI__builtin_arm_ldaex;
1831
1832 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1833
1834 // Ensure that we have the proper number of arguments.
1835 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
1836 return true;
1837
1838 // Inspect the pointer argument of the atomic builtin. This should always be
1839 // a pointer type, whose element is an integral scalar or pointer type.
1840 // Because it is a pointer type, we don't have to worry about any implicit
1841 // casts here.
1842 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
1843 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
1844 if (PointerArgRes.isInvalid())
1845 return true;
1846 PointerArg = PointerArgRes.get();
1847
1848 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
1849 if (!pointerType) {
1850 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
1851 << PointerArg->getType() << PointerArg->getSourceRange();
1852 return true;
1853 }
1854
1855 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
1856 // task is to insert the appropriate casts into the AST. First work out just
1857 // what the appropriate type is.
1858 QualType ValType = pointerType->getPointeeType();
1859 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
1860 if (IsLdrex)
1861 AddrType.addConst();
1862
1863 // Issue a warning if the cast is dodgy.
1864 CastKind CastNeeded = CK_NoOp;
1865 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
1866 CastNeeded = CK_BitCast;
1867 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
1868 << PointerArg->getType() << Context.getPointerType(AddrType)
1869 << AA_Passing << PointerArg->getSourceRange();
1870 }
1871
1872 // Finally, do the cast and replace the argument with the corrected version.
1873 AddrType = Context.getPointerType(AddrType);
1874 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
1875 if (PointerArgRes.isInvalid())
1876 return true;
1877 PointerArg = PointerArgRes.get();
1878
1879 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
1880
1881 // In general, we allow ints, floats and pointers to be loaded and stored.
1882 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
1883 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
1884 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
1885 << PointerArg->getType() << PointerArg->getSourceRange();
1886 return true;
1887 }
1888
1889 // But ARM doesn't have instructions to deal with 128-bit versions.
1890 if (Context.getTypeSize(ValType) > MaxWidth) {
1891 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
1892 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
1893 << PointerArg->getType() << PointerArg->getSourceRange();
1894 return true;
1895 }
1896
1897 switch (ValType.getObjCLifetime()) {
1898 case Qualifiers::OCL_None:
1899 case Qualifiers::OCL_ExplicitNone:
1900 // okay
1901 break;
1902
1903 case Qualifiers::OCL_Weak:
1904 case Qualifiers::OCL_Strong:
1905 case Qualifiers::OCL_Autoreleasing:
1906 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
1907 << ValType << PointerArg->getSourceRange();
1908 return true;
1909 }
1910
1911 if (IsLdrex) {
1912 TheCall->setType(ValType);
1913 return false;
1914 }
1915
1916 // Initialize the argument to be stored.
1917 ExprResult ValArg = TheCall->getArg(0);
1918 InitializedEntity Entity = InitializedEntity::InitializeParameter(
1919 Context, ValType, /*consume*/ false);
1920 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
1921 if (ValArg.isInvalid())
1922 return true;
1923 TheCall->setArg(0, ValArg.get());
1924
1925 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1926 // but the custom checker bypasses all default analysis.
1927 TheCall->setType(Context.IntTy);
1928 return false;
1929}
1930
1931bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1932 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1933 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1934 BuiltinID == ARM::BI__builtin_arm_strex ||
1935 BuiltinID == ARM::BI__builtin_arm_stlex) {
1936 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
1937 }
1938
1939 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1940 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1941 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
1942 }
1943
1944 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1945 BuiltinID == ARM::BI__builtin_arm_wsr64)
1946 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
1947
1948 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1949 BuiltinID == ARM::BI__builtin_arm_rsrp ||
1950 BuiltinID == ARM::BI__builtin_arm_wsr ||
1951 BuiltinID == ARM::BI__builtin_arm_wsrp)
1952 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1953
1954 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1955 return true;
1956
1957 // For intrinsics which take an immediate value as part of the instruction,
1958 // range check them here.
1959 // FIXME: VFP Intrinsics should error if VFP not present.
1960 switch (BuiltinID) {
1961 default: return false;
1962 case ARM::BI__builtin_arm_ssat:
1963 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
1964 case ARM::BI__builtin_arm_usat:
1965 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
1966 case ARM::BI__builtin_arm_ssat16:
1967 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
1968 case ARM::BI__builtin_arm_usat16:
1969 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
1970 case ARM::BI__builtin_arm_vcvtr_f:
1971 case ARM::BI__builtin_arm_vcvtr_d:
1972 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
1973 case ARM::BI__builtin_arm_dmb:
1974 case ARM::BI__builtin_arm_dsb:
1975 case ARM::BI__builtin_arm_isb:
1976 case ARM::BI__builtin_arm_dbg:
1977 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
1978 }
1979}
1980
1981bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
1982 CallExpr *TheCall) {
1983 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1984 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1985 BuiltinID == AArch64::BI__builtin_arm_strex ||
1986 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1987 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
1988 }
1989
1990 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1991 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1992 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
1993 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
1994 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
1995 }
1996
1997 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1998 BuiltinID == AArch64::BI__builtin_arm_wsr64)
1999 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2000
2001 // Memory Tagging Extensions (MTE) Intrinsics
2002 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
2003 BuiltinID == AArch64::BI__builtin_arm_addg ||
2004 BuiltinID == AArch64::BI__builtin_arm_gmi ||
2005 BuiltinID == AArch64::BI__builtin_arm_ldg ||
2006 BuiltinID == AArch64::BI__builtin_arm_stg ||
2007 BuiltinID == AArch64::BI__builtin_arm_subp) {
2008 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
2009 }
2010
2011 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
2012 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
2013 BuiltinID == AArch64::BI__builtin_arm_wsr ||
2014 BuiltinID == AArch64::BI__builtin_arm_wsrp)
2015 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2016
2017 // Only check the valid encoding range. Any constant in this range would be
2018 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
2019 // an exception for incorrect registers. This matches MSVC behavior.
2020 if (BuiltinID == AArch64::BI_ReadStatusReg ||
2021 BuiltinID == AArch64::BI_WriteStatusReg)
2022 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
2023
2024 if (BuiltinID == AArch64::BI__getReg)
2025 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
2026
2027 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
2028 return true;
2029
2030 // For intrinsics which take an immediate value as part of the instruction,
2031 // range check them here.
2032 unsigned i = 0, l = 0, u = 0;
2033 switch (BuiltinID) {
2034 default: return false;
2035 case AArch64::BI__builtin_arm_dmb:
2036 case AArch64::BI__builtin_arm_dsb:
2037 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
2038 }
2039
2040 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2041}
2042
2043bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
2044 struct BuiltinAndString {
2045 unsigned BuiltinID;
2046 const char *Str;
2047 };
2048
2049 static BuiltinAndString ValidCPU[] = {
2050 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
2051 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
2052 { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
2053 { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
2054 { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
2055 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
2056 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
2057 { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
2058 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
2059 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
2060 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
2061 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
2062 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
2063 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
2064 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
2065 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
2066 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
2067 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
2068 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
2069 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
2070 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
2071 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
2072 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
2073 };
2074
2075 static BuiltinAndString ValidHVX[] = {
2076 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
2077 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
2078 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
2079 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
2080 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
2081 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
2082 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
2083 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
2084 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
2085 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
2086 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
2087 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
2088 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
2089 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
2090 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
2091 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
2092 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
2093 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
2094 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
2095 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
2096 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
2097 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
2098 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
2099 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
2100 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
2101 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
2102 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
2103 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
2104 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
2105 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
2106 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
2107 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
2108 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
2109 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
2110 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
2111 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
2112 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
2113 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
2114 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
2115 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
2116 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
2117 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
2118 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
2119 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
2120 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
2121 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
2122 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
2123 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
2124 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
2125 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
2126 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
2127 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
2128 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
2129 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
2130 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
2131 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
2132 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
2133 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
2134 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
2135 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
2136 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
2137 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
2138 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
2139 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
2140 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
2141 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
2142 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
2143 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
2144 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
2145 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
2146 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
2147 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
2148 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
2149 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
2150 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
2151 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
2152 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
2153 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
2154 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
2155 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
2156 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
2157 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
2158 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
2159 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
2160 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
2161 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
2162 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
2163 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
2164 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
2165 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
2166 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
2167 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
2168 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
2169 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
2170 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
2171 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
2172 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
2173 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
2174 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
2175 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
2176 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
2177 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
2178 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
2179 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
2180 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
2181 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
2182 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
2183 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
2184 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
2185 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
2186 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
2187 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
2188 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
2189 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
2190 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
2191 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
2192 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
2193 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
2194 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
2195 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
2196 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
2197 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
2198 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
2199 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
2200 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
2201 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
2202 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
2203 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
2204 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
2205 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
2206 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
2207 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
2208 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
2209 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
2210 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
2211 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
2212 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
2213 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
2214 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
2215 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
2216 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
2217 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
2218 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
2219 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
2220 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
2221 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
2222 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
2223 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
2224 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
2225 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
2226 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
2227 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
2228 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
2229 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
2230 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
2231 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
2232 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
2233 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
2234 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
2235 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
2236 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
2237 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
2238 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
2239 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
2240 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
2241 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
2242 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
2243 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
2244 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
2245 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
2246 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
2247 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
2248 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
2249 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
2250 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
2251 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
2252 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
2253 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
2254 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
2255 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
2256 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
2257 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
2258 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
2259 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
2260 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
2261 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
2262 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
2263 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
2264 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
2265 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
2266 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
2267 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
2268 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
2269 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
2270 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
2271 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
2272 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
2273 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
2274 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
2275 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
2276 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
2277 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
2278 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
2279 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
2280 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
2281 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
2282 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
2283 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
2284 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
2285 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
2286 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
2287 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
2288 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
2289 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
2290 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
2291 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
2292 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
2293 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
2294 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
2295 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
2296 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
2297 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
2298 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
2299 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
2300 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
2301 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
2302 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
2303 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
2304 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
2305 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
2306 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
2307 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
2308 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
2309 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
2310 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
2311 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
2312 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
2313 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
2314 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
2315 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
2316 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
2317 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
2318 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
2319 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
2320 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
2321 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
2322 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
2323 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
2324 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
2325 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
2326 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
2327 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
2328 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
2329 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
2330 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
2331 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
2332 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
2333 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
2334 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
2335 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
2336 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
2337 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
2338 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
2339 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
2340 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
2341 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
2342 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
2343 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
2344 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
2345 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
2346 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
2347 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
2348 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
2349 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
2350 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
2351 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
2352 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
2353 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
2354 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
2355 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
2356 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
2357 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
2358 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
2359 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
2360 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
2361 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
2362 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
2363 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
2364 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
2365 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
2366 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
2367 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
2368 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
2369 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
2370 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
2371 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
2372 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
2373 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
2374 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
2375 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
2376 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
2377 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
2378 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
2379 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
2380 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
2381 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
2382 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
2383 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
2384 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
2385 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
2386 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
2387 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
2388 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
2389 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
2390 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
2391 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
2392 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
2393 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
2394 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
2395 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
2396 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
2397 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
2398 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
2399 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
2400 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
2401 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
2402 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
2403 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
2404 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
2405 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
2406 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
2407 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
2408 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
2409 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
2410 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
2411 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" },
2412 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" },
2413 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" },
2414 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" },
2415 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" },
2416 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" },
2417 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" },
2418 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" },
2419 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" },
2420 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" },
2421 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" },
2422 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" },
2423 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" },
2424 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" },
2425 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" },
2426 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" },
2427 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" },
2428 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" },
2429 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" },
2430 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" },
2431 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" },
2432 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" },
2433 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" },
2434 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" },
2435 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" },
2436 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" },
2437 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" },
2438 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" },
2439 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" },
2440 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" },
2441 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" },
2442 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" },
2443 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" },
2444 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" },
2445 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" },
2446 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" },
2447 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" },
2448 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" },
2449 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" },
2450 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" },
2451 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" },
2452 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" },
2453 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" },
2454 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" },
2455 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" },
2456 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" },
2457 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" },
2458 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" },
2459 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" },
2460 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" },
2461 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" },
2462 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" },
2463 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" },
2464 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" },
2465 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" },
2466 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" },
2467 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" },
2468 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" },
2469 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" },
2470 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" },
2471 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" },
2472 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" },
2473 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" },
2474 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" },
2475 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" },
2476 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" },
2477 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" },
2478 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" },
2479 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" },
2480 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" },
2481 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" },
2482 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" },
2483 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" },
2484 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" },
2485 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" },
2486 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" },
2487 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" },
2488 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" },
2489 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" },
2490 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" },
2491 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" },
2492 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" },
2493 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" },
2494 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" },
2495 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" },
2496 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" },
2497 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" },
2498 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" },
2499 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" },
2500 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" },
2501 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" },
2502 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" },
2503 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" },
2504 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" },
2505 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" },
2506 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" },
2507 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" },
2508 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" },
2509 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" },
2510 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" },
2511 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" },
2512 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" },
2513 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" },
2514 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" },
2515 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" },
2516 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" },
2517 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" },
2518 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" },
2519 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" },
2520 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" },
2521 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" },
2522 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" },
2523 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" },
2524 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" },
2525 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" },
2526 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" },
2527 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" },
2528 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" },
2529 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" },
2530 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" },
2531 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" },
2532 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" },
2533 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" },
2534 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" },
2535 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" },
2536 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" },
2537 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" },
2538 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" },
2539 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" },
2540 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" },
2541 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" },
2542 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" },
2543 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" },
2544 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" },
2545 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" },
2546 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" },
2547 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" },
2548 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" },
2549 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" },
2550 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" },
2551 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" },
2552 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" },
2553 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" },
2554 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" },
2555 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" },
2556 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" },
2557 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" },
2558 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" },
2559 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" },
2560 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" },
2561 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" },
2562 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" },
2563 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" },
2564 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" },
2565 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" },
2566 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" },
2567 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" },
2568 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" },
2569 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" },
2570 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" },
2571 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" },
2572 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" },
2573 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" },
2574 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" },
2575 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" },
2576 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" },
2577 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" },
2578 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" },
2579 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" },
2580 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" },
2581 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" },
2582 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" },
2583 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" },
2584 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" },
2585 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" },
2586 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" },
2587 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" },
2588 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" },
2589 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" },
2590 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" },
2591 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" },
2592 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" },
2593 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" },
2594 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" },
2595 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" },
2596 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" },
2597 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" },
2598 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" },
2599 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" },
2600 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" },
2601 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" },
2602 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" },
2603 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" },
2604 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" },
2605 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" },
2606 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" },
2607 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" },
2608 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" },
2609 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" },
2610 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" },
2611 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" },
2612 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" },
2613 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" },
2614 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" },
2615 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" },
2616 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" },
2617 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" },
2618 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" },
2619 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" },
2620 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" },
2621 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" },
2622 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" },
2623 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" },
2624 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" },
2625 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" },
2626 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" },
2627 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" },
2628 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" },
2629 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" },
2630 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" },
2631 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" },
2632 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" },
2633 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" },
2634 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" },
2635 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" },
2636 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" },
2637 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" },
2638 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" },
2639 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" },
2640 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" },
2641 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" },
2642 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" },
2643 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" },
2644 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" },
2645 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" },
2646 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" },
2647 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" },
2648 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" },
2649 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" },
2650 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" },
2651 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" },
2652 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" },
2653 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" },
2654 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" },
2655 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" },
2656 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" },
2657 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" },
2658 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" },
2659 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" },
2660 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" },
2661 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" },
2662 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" },
2663 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" },
2664 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" },
2665 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" },
2666 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" },
2667 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" },
2668 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" },
2669 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" },
2670 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" },
2671 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" },
2672 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" },
2673 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" },
2674 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" },
2675 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" },
2676 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" },
2677 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" },
2678 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" },
2679 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" },
2680 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" },
2681 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" },
2682 { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" },
2683 { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" },
2684 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" },
2685 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" },
2686 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" },
2687 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" },
2688 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" },
2689 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" },
2690 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" },
2691 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" },
2692 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" },
2693 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" },
2694 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" },
2695 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" },
2696 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" },
2697 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" },
2698 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" },
2699 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" },
2700 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" },
2701 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" },
2702 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" },
2703 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" },
2704 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" },
2705 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" },
2706 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" },
2707 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" },
2708 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" },
2709 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" },
2710 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" },
2711 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" },
2712 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" },
2713 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" },
2714 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" },
2715 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" },
2716 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" },
2717 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" },
2718 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" },
2719 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" },
2720 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" },
2721 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" },
2722 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" },
2723 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" },
2724 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" },
2725 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" },
2726 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" },
2727 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" },
2728 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" },
2729 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" },
2730 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" },
2731 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" },
2732 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" },
2733 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" },
2734 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" },
2735 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" },
2736 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" },
2737 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" },
2738 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" },
2739 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" },
2740 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" },
2741 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" },
2742 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" },
2743 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" },
2744 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" },
2745 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" },
2746 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" },
2747 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" },
2748 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" },
2749 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" },
2750 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" },
2751 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" },
2752 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" },
2753 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" },
2754 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" },
2755 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" },
2756 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" },
2757 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" },
2758 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" },
2759 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" },
2760 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" },
2761 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" },
2762 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" },
2763 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" },
2764 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" },
2765 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" },
2766 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" },
2767 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" },
2768 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" },
2769 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" },
2770 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" },
2771 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" },
2772 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" },
2773 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" },
2774 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" },
2775 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" },
2776 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" },
2777 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" },
2778 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" },
2779 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" },
2780 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" },
2781 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" },
2782 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" },
2783 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" },
2784 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" },
2785 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" },
2786 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" },
2787 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" },
2788 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" },
2789 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" },
2790 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" },
2791 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" },
2792 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" },
2793 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" },
2794 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" },
2795 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" },
2796 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" },
2797 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" },
2798 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" },
2799 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" },
2800 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" },
2801 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" },
2802 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" },
2803 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" },
2804 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" },
2805 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" },
2806 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" },
2807 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" },
2808 };
2809
2810 // Sort the tables on first execution so we can binary search them.
2811 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) {
2812 return LHS.BuiltinID < RHS.BuiltinID;
2813 };
2814 static const bool SortOnce =
2815 (llvm::sort(ValidCPU, SortCmp),
2816 llvm::sort(ValidHVX, SortCmp), true);
2817 (void)SortOnce;
2818 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) {
2819 return BI.BuiltinID < BuiltinID;
2820 };
2821
2822 const TargetInfo &TI = Context.getTargetInfo();
2823
2824 const BuiltinAndString *FC =
2825 std::lower_bound(std::begin(ValidCPU), std::end(ValidCPU), BuiltinID,
2826 LowerBoundCmp);
2827 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
2828 const TargetOptions &Opts = TI.getTargetOpts();
2829 StringRef CPU = Opts.CPU;
2830 if (!CPU.empty()) {
2831 assert(CPU.startswith("hexagon") && "Unexpected CPU name");
2832 CPU.consume_front("hexagon");
2833 SmallVector<StringRef, 3> CPUs;
2834 StringRef(FC->Str).split(CPUs, ',');
2835 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; }))
2836 return Diag(TheCall->getBeginLoc(),
2837 diag::err_hexagon_builtin_unsupported_cpu);
2838 }
2839 }
2840
2841 const BuiltinAndString *FH =
2842 std::lower_bound(std::begin(ValidHVX), std::end(ValidHVX), BuiltinID,
2843 LowerBoundCmp);
2844 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
2845 if (!TI.hasFeature("hvx"))
2846 return Diag(TheCall->getBeginLoc(),
2847 diag::err_hexagon_builtin_requires_hvx);
2848
2849 SmallVector<StringRef, 3> HVXs;
2850 StringRef(FH->Str).split(HVXs, ',');
2851 bool IsValid = llvm::any_of(HVXs,
2852 [&TI] (StringRef V) {
2853 std::string F = "hvx" + V.str();
2854 return TI.hasFeature(F);
2855 });
2856 if (!IsValid)
2857 return Diag(TheCall->getBeginLoc(),
2858 diag::err_hexagon_builtin_unsupported_hvx);
2859 }
2860
2861 return false;
2862}
2863
2864bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2865 struct ArgInfo {
2866 uint8_t OpNum;
2867 bool IsSigned;
2868 uint8_t BitWidth;
2869 uint8_t Align;
2870 };
2871 struct BuiltinInfo {
2872 unsigned BuiltinID;
2873 ArgInfo Infos[2];
2874 };
2875
2876 static BuiltinInfo Infos[] = {
2877 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2878 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2879 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2880 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
2881 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2882 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2883 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2884 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2885 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2886 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2887 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2888
2889 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2890 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2891 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2892 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2893 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2894 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2895 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2896 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2897 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2898 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2899 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2900
2901 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2902 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2903 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2904 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2905 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2906 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2907 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2908 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2909 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2910 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2911 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2912 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2913 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2914 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2915 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2916 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2917 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2918 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2919 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2920 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2921 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2922 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2923 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2924 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2925 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2926 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2927 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2928 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2929 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2930 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2931 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2932 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2933 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2934 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2935 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2936 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2937 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2938 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2939 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2940 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2941 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2942 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2943 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2944 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2945 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2946 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2947 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2948 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2949 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2950 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2951 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2952 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2953 {{ 1, false, 6, 0 }} },
2954 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2955 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2956 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2957 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2958 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2959 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2960 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2961 {{ 1, false, 5, 0 }} },
2962 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2963 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2964 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2965 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2966 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2967 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2968 { 2, false, 5, 0 }} },
2969 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2970 { 2, false, 6, 0 }} },
2971 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2972 { 3, false, 5, 0 }} },
2973 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2974 { 3, false, 6, 0 }} },
2975 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2976 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2977 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2978 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2979 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2980 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2981 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2982 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2983 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2984 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2985 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2986 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2987 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2988 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2989 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2990 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2991 {{ 2, false, 4, 0 },
2992 { 3, false, 5, 0 }} },
2993 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2994 {{ 2, false, 4, 0 },
2995 { 3, false, 5, 0 }} },
2996 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2997 {{ 2, false, 4, 0 },
2998 { 3, false, 5, 0 }} },
2999 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
3000 {{ 2, false, 4, 0 },
3001 { 3, false, 5, 0 }} },
3002 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
3003 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
3004 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
3005 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
3006 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
3007 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
3008 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
3009 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
3010 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
3011 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
3012 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
3013 { 2, false, 5, 0 }} },
3014 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
3015 { 2, false, 6, 0 }} },
3016 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
3017 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
3018 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
3019 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
3020 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
3021 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
3022 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
3023 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
3024 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
3025 {{ 1, false, 4, 0 }} },
3026 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
3027 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
3028 {{ 1, false, 4, 0 }} },
3029 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
3030 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
3031 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
3032 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
3033 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
3034 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
3035 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
3036 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
3037 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
3038 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
3039 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
3040 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
3041 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
3042 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
3043 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
3044 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
3045 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
3046 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
3047 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
3048 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
3049 {{ 3, false, 1, 0 }} },
3050 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
3051 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
3052 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
3053 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
3054 {{ 3, false, 1, 0 }} },
3055 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
3056 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
3057 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
3058 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
3059 {{ 3, false, 1, 0 }} },
3060 };
3061
3062 // Use a dynamically initialized static to sort the table exactly once on
3063 // first run.
3064 static const bool SortOnce =
3065 (llvm::sort(Infos,
3066 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
3067 return LHS.BuiltinID < RHS.BuiltinID;
3068 }),
3069 true);
3070 (void)SortOnce;
3071
3072 const BuiltinInfo *F =
3073 std::lower_bound(std::begin(Infos), std::end(Infos), BuiltinID,
3074 [](const BuiltinInfo &BI, unsigned BuiltinID) {
3075 return BI.BuiltinID < BuiltinID;
3076 });
3077 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
3078 return false;
3079
3080 bool Error = false;
3081
3082 for (const ArgInfo &A : F->Infos) {
3083 // Ignore empty ArgInfo elements.
3084 if (A.BitWidth == 0)
3085 continue;
3086
3087 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
3088 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
3089 if (!A.Align) {
3090 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
3091 } else {
3092 unsigned M = 1 << A.Align;
3093 Min *= M;
3094 Max *= M;
3095 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
3096 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
3097 }
3098 }
3099 return Error;
3100}
3101
3102bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
3103 CallExpr *TheCall) {
3104 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) ||
3105 CheckHexagonBuiltinArgument(BuiltinID, TheCall);
3106}
3107
3108
3109// CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
3110// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3111// ordering for DSP is unspecified. MSA is ordered by the data format used
3112// by the underlying instruction i.e., df/m, df/n and then by size.
3113//
3114// FIXME: The size tests here should instead be tablegen'd along with the
3115// definitions from include/clang/Basic/BuiltinsMips.def.
3116// FIXME: GCC is strict on signedness for some of these intrinsics, we should
3117// be too.
3118bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3119 unsigned i = 0, l = 0, u = 0, m = 0;
3120 switch (BuiltinID) {
3121 default: return false;
3122 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3123 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3124 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3125 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3126 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3127 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3128 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3129 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3130 // df/m field.
3131 // These intrinsics take an unsigned 3 bit immediate.
3132 case Mips::BI__builtin_msa_bclri_b:
3133 case Mips::BI__builtin_msa_bnegi_b:
3134 case Mips::BI__builtin_msa_bseti_b:
3135 case Mips::BI__builtin_msa_sat_s_b:
3136 case Mips::BI__builtin_msa_sat_u_b:
3137 case Mips::BI__builtin_msa_slli_b:
3138 case Mips::BI__builtin_msa_srai_b:
3139 case Mips::BI__builtin_msa_srari_b:
3140 case Mips::BI__builtin_msa_srli_b:
3141 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3142 case Mips::BI__builtin_msa_binsli_b:
3143 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3144 // These intrinsics take an unsigned 4 bit immediate.
3145 case Mips::BI__builtin_msa_bclri_h:
3146 case Mips::BI__builtin_msa_bnegi_h:
3147 case Mips::BI__builtin_msa_bseti_h:
3148 case Mips::BI__builtin_msa_sat_s_h:
3149 case Mips::BI__builtin_msa_sat_u_h:
3150 case Mips::BI__builtin_msa_slli_h:
3151 case Mips::BI__builtin_msa_srai_h:
3152 case Mips::BI__builtin_msa_srari_h:
3153 case Mips::BI__builtin_msa_srli_h:
3154 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3155 case Mips::BI__builtin_msa_binsli_h:
3156 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3157 // These intrinsics take an unsigned 5 bit immediate.
3158 // The first block of intrinsics actually have an unsigned 5 bit field,
3159 // not a df/n field.
3160 case Mips::BI__builtin_msa_cfcmsa:
3161 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3162 case Mips::BI__builtin_msa_clei_u_b:
3163 case Mips::BI__builtin_msa_clei_u_h:
3164 case Mips::BI__builtin_msa_clei_u_w:
3165 case Mips::BI__builtin_msa_clei_u_d:
3166 case Mips::BI__builtin_msa_clti_u_b:
3167 case Mips::BI__builtin_msa_clti_u_h:
3168 case Mips::BI__builtin_msa_clti_u_w:
3169 case Mips::BI__builtin_msa_clti_u_d:
3170 case Mips::BI__builtin_msa_maxi_u_b:
3171 case Mips::BI__builtin_msa_maxi_u_h:
3172 case Mips::BI__builtin_msa_maxi_u_w:
3173 case Mips::BI__builtin_msa_maxi_u_d:
3174 case Mips::BI__builtin_msa_mini_u_b:
3175 case Mips::BI__builtin_msa_mini_u_h:
3176 case Mips::BI__builtin_msa_mini_u_w:
3177 case Mips::BI__builtin_msa_mini_u_d:
3178 case Mips::BI__builtin_msa_addvi_b:
3179 case Mips::BI__builtin_msa_addvi_h:
3180 case Mips::BI__builtin_msa_addvi_w:
3181 case Mips::BI__builtin_msa_addvi_d:
3182 case Mips::BI__builtin_msa_bclri_w:
3183 case Mips::BI__builtin_msa_bnegi_w:
3184 case Mips::BI__builtin_msa_bseti_w:
3185 case Mips::BI__builtin_msa_sat_s_w:
3186 case Mips::BI__builtin_msa_sat_u_w:
3187 case Mips::BI__builtin_msa_slli_w:
3188 case Mips::BI__builtin_msa_srai_w:
3189 case Mips::BI__builtin_msa_srari_w:
3190 case Mips::BI__builtin_msa_srli_w:
3191 case Mips::BI__builtin_msa_srlri_w:
3192 case Mips::BI__builtin_msa_subvi_b:
3193 case Mips::BI__builtin_msa_subvi_h:
3194 case Mips::BI__builtin_msa_subvi_w:
3195 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3196 case Mips::BI__builtin_msa_binsli_w:
3197 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3198 // These intrinsics take an unsigned 6 bit immediate.
3199 case Mips::BI__builtin_msa_bclri_d:
3200 case Mips::BI__builtin_msa_bnegi_d:
3201 case Mips::BI__builtin_msa_bseti_d:
3202 case Mips::BI__builtin_msa_sat_s_d:
3203 case Mips::BI__builtin_msa_sat_u_d:
3204 case Mips::BI__builtin_msa_slli_d:
3205 case Mips::BI__builtin_msa_srai_d:
3206 case Mips::BI__builtin_msa_srari_d:
3207 case Mips::BI__builtin_msa_srli_d:
3208 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3209 case Mips::BI__builtin_msa_binsli_d:
3210 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3211 // These intrinsics take a signed 5 bit immediate.
3212 case Mips::BI__builtin_msa_ceqi_b:
3213 case Mips::BI__builtin_msa_ceqi_h:
3214 case Mips::BI__builtin_msa_ceqi_w:
3215 case Mips::BI__builtin_msa_ceqi_d:
3216 case Mips::BI__builtin_msa_clti_s_b:
3217 case Mips::BI__builtin_msa_clti_s_h:
3218 case Mips::BI__builtin_msa_clti_s_w:
3219 case Mips::BI__builtin_msa_clti_s_d:
3220 case Mips::BI__builtin_msa_clei_s_b:
3221 case Mips::BI__builtin_msa_clei_s_h:
3222 case Mips::BI__builtin_msa_clei_s_w:
3223 case Mips::BI__builtin_msa_clei_s_d:
3224 case Mips::BI__builtin_msa_maxi_s_b:
3225 case Mips::BI__builtin_msa_maxi_s_h:
3226 case Mips::BI__builtin_msa_maxi_s_w:
3227 case Mips::BI__builtin_msa_maxi_s_d:
3228 case Mips::BI__builtin_msa_mini_s_b:
3229 case Mips::BI__builtin_msa_mini_s_h:
3230 case Mips::BI__builtin_msa_mini_s_w:
3231 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3232 // These intrinsics take an unsigned 8 bit immediate.
3233 case Mips::BI__builtin_msa_andi_b:
3234 case Mips::BI__builtin_msa_nori_b:
3235 case Mips::BI__builtin_msa_ori_b:
3236 case Mips::BI__builtin_msa_shf_b:
3237 case Mips::BI__builtin_msa_shf_h:
3238 case Mips::BI__builtin_msa_shf_w:
3239 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3240 case Mips::BI__builtin_msa_bseli_b:
3241 case Mips::BI__builtin_msa_bmnzi_b:
3242 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3243 // df/n format
3244 // These intrinsics take an unsigned 4 bit immediate.
3245 case Mips::BI__builtin_msa_copy_s_b:
3246 case Mips::BI__builtin_msa_copy_u_b:
3247 case Mips::BI__builtin_msa_insve_b:
3248 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3249 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3250 // These intrinsics take an unsigned 3 bit immediate.
3251 case Mips::BI__builtin_msa_copy_s_h:
3252 case Mips::BI__builtin_msa_copy_u_h:
3253 case Mips::BI__builtin_msa_insve_h:
3254 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3255 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3256 // These intrinsics take an unsigned 2 bit immediate.
3257 case Mips::BI__builtin_msa_copy_s_w:
3258 case Mips::BI__builtin_msa_copy_u_w:
3259 case Mips::BI__builtin_msa_insve_w:
3260 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3261 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3262 // These intrinsics take an unsigned 1 bit immediate.
3263 case Mips::BI__builtin_msa_copy_s_d:
3264 case Mips::BI__builtin_msa_copy_u_d:
3265 case Mips::BI__builtin_msa_insve_d:
3266 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3267 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3268 // Memory offsets and immediate loads.
3269 // These intrinsics take a signed 10 bit immediate.
3270 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3271 case Mips::BI__builtin_msa_ldi_h:
3272 case Mips::BI__builtin_msa_ldi_w:
3273 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3274 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3275 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3276 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3277 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3278 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3279 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3280 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3281 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3282 }
3283
3284 if (!m)
3285 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3286
3287 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3288 SemaBuiltinConstantArgMultiple(TheCall, i, m);
3289}
3290
3291bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3292 unsigned i = 0, l = 0, u = 0;
3293 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
3294 BuiltinID == PPC::BI__builtin_divdeu ||
3295 BuiltinID == PPC::BI__builtin_bpermd;
3296 bool IsTarget64Bit = Context.getTargetInfo()
3297 .getTypeWidth(Context
3298 .getTargetInfo()
3299 .getIntPtrType()) == 64;
3300 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
3301 BuiltinID == PPC::BI__builtin_divweu ||
3302 BuiltinID == PPC::BI__builtin_divde ||
3303 BuiltinID == PPC::BI__builtin_divdeu;
3304
3305 if (Is64BitBltin && !IsTarget64Bit)
3306 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3307 << TheCall->getSourceRange();
3308
3309 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
3310 (BuiltinID == PPC::BI__builtin_bpermd &&
3311 !Context.getTargetInfo().hasFeature("bpermd")))
3312 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
3313 << TheCall->getSourceRange();
3314
3315 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool {
3316 if (!Context.getTargetInfo().hasFeature("vsx"))
3317 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
3318 << TheCall->getSourceRange();
3319 return false;
3320 };
3321
3322 switch (BuiltinID) {
3323 default: return false;
3324 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
3325 case PPC::BI__builtin_altivec_crypto_vshasigmad:
3326 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3327 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3328 case PPC::BI__builtin_tbegin:
3329 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
3330 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
3331 case PPC::BI__builtin_tabortwc:
3332 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
3333 case PPC::BI__builtin_tabortwci:
3334 case PPC::BI__builtin_tabortdci:
3335 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
3336 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
3337 case PPC::BI__builtin_vsx_xxpermdi:
3338 case PPC::BI__builtin_vsx_xxsldwi:
3339 return SemaBuiltinVSX(TheCall);
3340 case PPC::BI__builtin_unpack_vector_int128:
3341 return SemaVSXCheck(TheCall) ||
3342 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3343 case PPC::BI__builtin_pack_vector_int128:
3344 return SemaVSXCheck(TheCall);
3345 }
3346 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3347}
3348
3349bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
3350 CallExpr *TheCall) {
3351 if (BuiltinID == SystemZ::BI__builtin_tabort) {
3352 Expr *Arg = TheCall->getArg(0);
3353 llvm::APSInt AbortCode(32);
3354 if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
3355 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
3356 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
3357 << Arg->getSourceRange();
3358 }
3359
3360 // For intrinsics which take an immediate value as part of the instruction,
3361 // range check them here.
3362 unsigned i = 0, l = 0, u = 0;
3363 switch (BuiltinID) {
3364 default: return false;
3365 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
3366 case SystemZ::BI__builtin_s390_verimb:
3367 case SystemZ::BI__builtin_s390_verimh:
3368 case SystemZ::BI__builtin_s390_verimf:
3369 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
3370 case SystemZ::BI__builtin_s390_vfaeb:
3371 case SystemZ::BI__builtin_s390_vfaeh:
3372 case SystemZ::BI__builtin_s390_vfaef:
3373 case SystemZ::BI__builtin_s390_vfaebs:
3374 case SystemZ::BI__builtin_s390_vfaehs:
3375 case SystemZ::BI__builtin_s390_vfaefs:
3376 case SystemZ::BI__builtin_s390_vfaezb:
3377 case SystemZ::BI__builtin_s390_vfaezh:
3378 case SystemZ::BI__builtin_s390_vfaezf:
3379 case SystemZ::BI__builtin_s390_vfaezbs:
3380 case SystemZ::BI__builtin_s390_vfaezhs:
3381 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3382 case SystemZ::BI__builtin_s390_vfisb:
3383 case SystemZ::BI__builtin_s390_vfidb:
3384 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3385 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3386 case SystemZ::BI__builtin_s390_vftcisb:
3387 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3388 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3389 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3390 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3391 case SystemZ::BI__builtin_s390_vstrcb:
3392 case SystemZ::BI__builtin_s390_vstrch:
3393 case SystemZ::BI__builtin_s390_vstrcf:
3394 case SystemZ::BI__builtin_s390_vstrczb:
3395 case SystemZ::BI__builtin_s390_vstrczh:
3396 case SystemZ::BI__builtin_s390_vstrczf:
3397 case SystemZ::BI__builtin_s390_vstrcbs:
3398 case SystemZ::BI__builtin_s390_vstrchs:
3399 case SystemZ::BI__builtin_s390_vstrcfs:
3400 case SystemZ::BI__builtin_s390_vstrczbs:
3401 case SystemZ::BI__builtin_s390_vstrczhs:
3402 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3403 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3404 case SystemZ::BI__builtin_s390_vfminsb:
3405 case SystemZ::BI__builtin_s390_vfmaxsb:
3406 case SystemZ::BI__builtin_s390_vfmindb:
3407 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3408 }
3409 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3410}
3411
3412/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3413/// This checks that the target supports __builtin_cpu_supports and
3414/// that the string argument is constant and valid.
3415static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
3416 Expr *Arg = TheCall->getArg(0);
3417
3418 // Check if the argument is a string literal.
3419 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3420 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3421 << Arg->getSourceRange();
3422
3423 // Check the contents of the string.
3424 StringRef Feature =
3425 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3426 if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
3427 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
3428 << Arg->getSourceRange();
3429 return false;
3430}
3431
3432/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3433/// This checks that the target supports __builtin_cpu_is and
3434/// that the string argument is constant and valid.
3435static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
3436 Expr *Arg = TheCall->getArg(0);
3437
3438 // Check if the argument is a string literal.
3439 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3440 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3441 << Arg->getSourceRange();
3442
3443 // Check the contents of the string.
3444 StringRef Feature =
3445 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3446 if (!S.Context.getTargetInfo().validateCpuIs(Feature))
3447 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
3448 << Arg->getSourceRange();
3449 return false;
3450}
3451
3452// Check if the rounding mode is legal.
3453bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3454 // Indicates if this instruction has rounding control or just SAE.
3455 bool HasRC = false;
3456
3457 unsigned ArgNum = 0;
3458 switch (BuiltinID) {
3459 default:
3460 return false;
3461 case X86::BI__builtin_ia32_vcvttsd2si32:
3462 case X86::BI__builtin_ia32_vcvttsd2si64:
3463 case X86::BI__builtin_ia32_vcvttsd2usi32:
3464 case X86::BI__builtin_ia32_vcvttsd2usi64:
3465 case X86::BI__builtin_ia32_vcvttss2si32:
3466 case X86::BI__builtin_ia32_vcvttss2si64:
3467 case X86::BI__builtin_ia32_vcvttss2usi32:
3468 case X86::BI__builtin_ia32_vcvttss2usi64:
3469 ArgNum = 1;
3470 break;
3471 case X86::BI__builtin_ia32_maxpd512:
3472 case X86::BI__builtin_ia32_maxps512:
3473 case X86::BI__builtin_ia32_minpd512:
3474 case X86::BI__builtin_ia32_minps512:
3475 ArgNum = 2;
3476 break;
3477 case X86::BI__builtin_ia32_cvtps2pd512_mask:
3478 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3479 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3480 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3481 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3482 case X86::BI__builtin_ia32_cvttps2dq512_mask:
3483 case X86::BI__builtin_ia32_cvttps2qq512_mask:
3484 case X86::BI__builtin_ia32_cvttps2udq512_mask:
3485 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3486 case X86::BI__builtin_ia32_exp2pd_mask:
3487 case X86::BI__builtin_ia32_exp2ps_mask:
3488 case X86::BI__builtin_ia32_getexppd512_mask:
3489 case X86::BI__builtin_ia32_getexpps512_mask:
3490 case X86::BI__builtin_ia32_rcp28pd_mask:
3491 case X86::BI__builtin_ia32_rcp28ps_mask:
3492 case X86::BI__builtin_ia32_rsqrt28pd_mask:
3493 case X86::BI__builtin_ia32_rsqrt28ps_mask:
3494 case X86::BI__builtin_ia32_vcomisd:
3495 case X86::BI__builtin_ia32_vcomiss:
3496 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3497 ArgNum = 3;
3498 break;
3499 case X86::BI__builtin_ia32_cmppd512_mask:
3500 case X86::BI__builtin_ia32_cmpps512_mask:
3501 case X86::BI__builtin_ia32_cmpsd_mask:
3502 case X86::BI__builtin_ia32_cmpss_mask:
3503 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3504 case X86::BI__builtin_ia32_getexpsd128_round_mask:
3505 case X86::BI__builtin_ia32_getexpss128_round_mask:
3506 case X86::BI__builtin_ia32_getmantpd512_mask:
3507 case X86::BI__builtin_ia32_getmantps512_mask:
3508 case X86::BI__builtin_ia32_maxsd_round_mask:
3509 case X86::BI__builtin_ia32_maxss_round_mask:
3510 case X86::BI__builtin_ia32_minsd_round_mask:
3511 case X86::BI__builtin_ia32_minss_round_mask:
3512 case X86::BI__builtin_ia32_rcp28sd_round_mask:
3513 case X86::BI__builtin_ia32_rcp28ss_round_mask:
3514 case X86::BI__builtin_ia32_reducepd512_mask:
3515 case X86::BI__builtin_ia32_reduceps512_mask:
3516 case X86::BI__builtin_ia32_rndscalepd_mask:
3517 case X86::BI__builtin_ia32_rndscaleps_mask:
3518 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3519 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3520 ArgNum = 4;
3521 break;
3522 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3523 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3524 case X86::BI__builtin_ia32_fixupimmps512_mask:
3525 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3526 case X86::BI__builtin_ia32_fixupimmsd_mask:
3527 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3528 case X86::BI__builtin_ia32_fixupimmss_mask:
3529 case X86::BI__builtin_ia32_fixupimmss_maskz:
3530 case X86::BI__builtin_ia32_getmantsd_round_mask:
3531 case X86::BI__builtin_ia32_getmantss_round_mask:
3532 case X86::BI__builtin_ia32_rangepd512_mask:
3533 case X86::BI__builtin_ia32_rangeps512_mask:
3534 case X86::BI__builtin_ia32_rangesd128_round_mask:
3535 case X86::BI__builtin_ia32_rangess128_round_mask:
3536 case X86::BI__builtin_ia32_reducesd_mask:
3537 case X86::BI__builtin_ia32_reducess_mask:
3538 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3539 case X86::BI__builtin_ia32_rndscaless_round_mask:
3540 ArgNum = 5;
3541 break;
3542 case X86::BI__builtin_ia32_vcvtsd2si64:
3543 case X86::BI__builtin_ia32_vcvtsd2si32:
3544 case X86::BI__builtin_ia32_vcvtsd2usi32:
3545 case X86::BI__builtin_ia32_vcvtsd2usi64:
3546 case X86::BI__builtin_ia32_vcvtss2si32:
3547 case X86::BI__builtin_ia32_vcvtss2si64:
3548 case X86::BI__builtin_ia32_vcvtss2usi32:
3549 case X86::BI__builtin_ia32_vcvtss2usi64:
3550 case X86::BI__builtin_ia32_sqrtpd512:
3551 case X86::BI__builtin_ia32_sqrtps512:
3552 ArgNum = 1;
3553 HasRC = true;
3554 break;
3555 case X86::BI__builtin_ia32_addpd512:
3556 case X86::BI__builtin_ia32_addps512:
3557 case X86::BI__builtin_ia32_divpd512:
3558 case X86::BI__builtin_ia32_divps512:
3559 case X86::BI__builtin_ia32_mulpd512:
3560 case X86::BI__builtin_ia32_mulps512:
3561 case X86::BI__builtin_ia32_subpd512:
3562 case X86::BI__builtin_ia32_subps512:
3563 case X86::BI__builtin_ia32_cvtsi2sd64:
3564 case X86::BI__builtin_ia32_cvtsi2ss32:
3565 case X86::BI__builtin_ia32_cvtsi2ss64:
3566 case X86::BI__builtin_ia32_cvtusi2sd64:
3567 case X86::BI__builtin_ia32_cvtusi2ss32:
3568 case X86::BI__builtin_ia32_cvtusi2ss64:
3569 ArgNum = 2;
3570 HasRC = true;
3571 break;
3572 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
3573 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
3574 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
3575 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
3576 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
3577 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
3578 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
3579 case X86::BI__builtin_ia32_cvtps2dq512_mask:
3580 case X86::BI__builtin_ia32_cvtps2qq512_mask:
3581 case X86::BI__builtin_ia32_cvtps2udq512_mask:
3582 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
3583 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
3584 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
3585 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
3586 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
3587 ArgNum = 3;
3588 HasRC = true;
3589 break;
3590 case X86::BI__builtin_ia32_addss_round_mask:
3591 case X86::BI__builtin_ia32_addsd_round_mask:
3592 case X86::BI__builtin_ia32_divss_round_mask:
3593 case X86::BI__builtin_ia32_divsd_round_mask:
3594 case X86::BI__builtin_ia32_mulss_round_mask:
3595 case X86::BI__builtin_ia32_mulsd_round_mask:
3596 case X86::BI__builtin_ia32_subss_round_mask:
3597 case X86::BI__builtin_ia32_subsd_round_mask:
3598 case X86::BI__builtin_ia32_scalefpd512_mask:
3599 case X86::BI__builtin_ia32_scalefps512_mask:
3600 case X86::BI__builtin_ia32_scalefsd_round_mask:
3601 case X86::BI__builtin_ia32_scalefss_round_mask:
3602 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
3603 case X86::BI__builtin_ia32_sqrtsd_round_mask:
3604 case X86::BI__builtin_ia32_sqrtss_round_mask:
3605 case X86::BI__builtin_ia32_vfmaddsd3_mask:
3606 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
3607 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
3608 case X86::BI__builtin_ia32_vfmaddss3_mask:
3609 case X86::BI__builtin_ia32_vfmaddss3_maskz:
3610 case X86::BI__builtin_ia32_vfmaddss3_mask3:
3611 case X86::BI__builtin_ia32_vfmaddpd512_mask:
3612 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
3613 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
3614 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
3615 case X86::BI__builtin_ia32_vfmaddps512_mask:
3616 case X86::BI__builtin_ia32_vfmaddps512_maskz:
3617 case X86::BI__builtin_ia32_vfmaddps512_mask3:
3618 case X86::BI__builtin_ia32_vfmsubps512_mask3:
3619 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
3620 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
3621 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
3622 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
3623 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
3624 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
3625 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
3626 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
3627 ArgNum = 4;
3628 HasRC = true;
3629 break;
3630 }
3631
3632 llvm::APSInt Result;
3633
3634 // We can't check the value of a dependent argument.
3635 Expr *Arg = TheCall->getArg(ArgNum);
3636 if (Arg->isTypeDependent() || Arg->isValueDependent())
3637 return false;
3638
3639 // Check constant-ness first.
3640 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3641 return true;
3642
3643 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
3644 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
3645 // combined with ROUND_NO_EXC.
3646 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
3647 Result == 8/*ROUND_NO_EXC*/ ||
3648 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
3649 return false;
3650
3651 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
3652 << Arg->getSourceRange();
3653}
3654
3655// Check if the gather/scatter scale is legal.
3656bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
3657 CallExpr *TheCall) {
3658 unsigned ArgNum = 0;
3659 switch (BuiltinID) {
3660 default:
3661 return false;
3662 case X86::BI__builtin_ia32_gatherpfdpd:
3663 case X86::BI__builtin_ia32_gatherpfdps:
3664 case X86::BI__builtin_ia32_gatherpfqpd:
3665 case X86::BI__builtin_ia32_gatherpfqps:
3666 case X86::BI__builtin_ia32_scatterpfdpd:
3667 case X86::BI__builtin_ia32_scatterpfdps:
3668 case X86::BI__builtin_ia32_scatterpfqpd:
3669 case X86::BI__builtin_ia32_scatterpfqps:
3670 ArgNum = 3;
3671 break;
3672 case X86::BI__builtin_ia32_gatherd_pd:
3673 case X86::BI__builtin_ia32_gatherd_pd256:
3674 case X86::BI__builtin_ia32_gatherq_pd:
3675 case X86::BI__builtin_ia32_gatherq_pd256:
3676 case X86::BI__builtin_ia32_gatherd_ps:
3677 case X86::BI__builtin_ia32_gatherd_ps256:
3678 case X86::BI__builtin_ia32_gatherq_ps:
3679 case X86::BI__builtin_ia32_gatherq_ps256:
3680 case X86::BI__builtin_ia32_gatherd_q:
3681 case X86::BI__builtin_ia32_gatherd_q256:
3682 case X86::BI__builtin_ia32_gatherq_q:
3683 case X86::BI__builtin_ia32_gatherq_q256:
3684 case X86::BI__builtin_ia32_gatherd_d:
3685 case X86::BI__builtin_ia32_gatherd_d256:
3686 case X86::BI__builtin_ia32_gatherq_d:
3687 case X86::BI__builtin_ia32_gatherq_d256:
3688 case X86::BI__builtin_ia32_gather3div2df:
3689 case X86::BI__builtin_ia32_gather3div2di:
3690 case X86::BI__builtin_ia32_gather3div4df:
3691 case X86::BI__builtin_ia32_gather3div4di:
3692 case X86::BI__builtin_ia32_gather3div4sf:
3693 case X86::BI__builtin_ia32_gather3div4si:
3694 case X86::BI__builtin_ia32_gather3div8sf:
3695 case X86::BI__builtin_ia32_gather3div8si:
3696 case X86::BI__builtin_ia32_gather3siv2df:
3697 case X86::BI__builtin_ia32_gather3siv2di:
3698 case X86::BI__builtin_ia32_gather3siv4df:
3699 case X86::BI__builtin_ia32_gather3siv4di:
3700 case X86::BI__builtin_ia32_gather3siv4sf:
3701 case X86::BI__builtin_ia32_gather3siv4si:
3702 case X86::BI__builtin_ia32_gather3siv8sf:
3703 case X86::BI__builtin_ia32_gather3siv8si:
3704 case X86::BI__builtin_ia32_gathersiv8df:
3705 case X86::BI__builtin_ia32_gathersiv16sf:
3706 case X86::BI__builtin_ia32_gatherdiv8df:
3707 case X86::BI__builtin_ia32_gatherdiv16sf:
3708 case X86::BI__builtin_ia32_gathersiv8di:
3709 case X86::BI__builtin_ia32_gathersiv16si:
3710 case X86::BI__builtin_ia32_gatherdiv8di:
3711 case X86::BI__builtin_ia32_gatherdiv16si:
3712 case X86::BI__builtin_ia32_scatterdiv2df:
3713 case X86::BI__builtin_ia32_scatterdiv2di:
3714 case X86::BI__builtin_ia32_scatterdiv4df:
3715 case X86::BI__builtin_ia32_scatterdiv4di:
3716 case X86::BI__builtin_ia32_scatterdiv4sf:
3717 case X86::BI__builtin_ia32_scatterdiv4si:
3718 case X86::BI__builtin_ia32_scatterdiv8sf:
3719 case X86::BI__builtin_ia32_scatterdiv8si:
3720 case X86::BI__builtin_ia32_scattersiv2df:
3721 case X86::BI__builtin_ia32_scattersiv2di:
3722 case X86::BI__builtin_ia32_scattersiv4df:
3723 case X86::BI__builtin_ia32_scattersiv4di:
3724 case X86::BI__builtin_ia32_scattersiv4sf:
3725 case X86::BI__builtin_ia32_scattersiv4si:
3726 case X86::BI__builtin_ia32_scattersiv8sf:
3727 case X86::BI__builtin_ia32_scattersiv8si:
3728 case X86::BI__builtin_ia32_scattersiv8df:
3729 case X86::BI__builtin_ia32_scattersiv16sf:
3730 case X86::BI__builtin_ia32_scatterdiv8df:
3731 case X86::BI__builtin_ia32_scatterdiv16sf:
3732 case X86::BI__builtin_ia32_scattersiv8di:
3733 case X86::BI__builtin_ia32_scattersiv16si:
3734 case X86::BI__builtin_ia32_scatterdiv8di:
3735 case X86::BI__builtin_ia32_scatterdiv16si:
3736 ArgNum = 4;
3737 break;
3738 }
3739
3740 llvm::APSInt Result;
3741
3742 // We can't check the value of a dependent argument.
3743 Expr *Arg = TheCall->getArg(ArgNum);
3744 if (Arg->isTypeDependent() || Arg->isValueDependent())
3745 return false;
3746
3747 // Check constant-ness first.
3748 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3749 return true;
3750
3751 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
3752 return false;
3753
3754 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
3755 << Arg->getSourceRange();
3756}
3757
3758static bool isX86_32Builtin(unsigned BuiltinID) {
3759 // These builtins only work on x86-32 targets.
3760 switch (BuiltinID) {
3761 case X86::BI__builtin_ia32_readeflags_u32:
3762 case X86::BI__builtin_ia32_writeeflags_u32:
3763 return true;
3764 }
3765
3766 return false;
3767}
3768
3769bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3770 if (BuiltinID == X86::BI__builtin_cpu_supports)
3771 return SemaBuiltinCpuSupports(*this, TheCall);
3772
3773 if (BuiltinID == X86::BI__builtin_cpu_is)
3774 return SemaBuiltinCpuIs(*this, TheCall);
3775
3776 // Check for 32-bit only builtins on a 64-bit target.
3777 const llvm::Triple &TT = Context.getTargetInfo().getTriple();
3778 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
3779 return Diag(TheCall->getCallee()->getBeginLoc(),
3780 diag::err_32_bit_builtin_64_bit_tgt);
3781
3782 // If the intrinsic has rounding or SAE make sure its valid.
3783 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
3784 return true;
3785
3786 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
3787 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
3788 return true;
3789
3790 // For intrinsics which take an immediate value as part of the instruction,
3791 // range check them here.
3792 int i = 0, l = 0, u = 0;
3793 switch (BuiltinID) {
3794 default:
3795 return false;
3796 case X86::BI__builtin_ia32_vec_ext_v2si:
3797 case X86::BI__builtin_ia32_vec_ext_v2di:
3798 case X86::BI__builtin_ia32_vextractf128_pd256:
3799 case X86::BI__builtin_ia32_vextractf128_ps256:
3800 case X86::BI__builtin_ia32_vextractf128_si256:
3801 case X86::BI__builtin_ia32_extract128i256:
3802 case X86::BI__builtin_ia32_extractf64x4_mask:
3803 case X86::BI__builtin_ia32_extracti64x4_mask:
3804 case X86::BI__builtin_ia32_extractf32x8_mask:
3805 case X86::BI__builtin_ia32_extracti32x8_mask:
3806 case X86::BI__builtin_ia32_extractf64x2_256_mask:
3807 case X86::BI__builtin_ia32_extracti64x2_256_mask:
3808 case X86::BI__builtin_ia32_extractf32x4_256_mask:
3809 case X86::BI__builtin_ia32_extracti32x4_256_mask:
3810 i = 1; l = 0; u = 1;
3811 break;
3812 case X86::BI__builtin_ia32_vec_set_v2di:
3813 case X86::BI__builtin_ia32_vinsertf128_pd256:
3814 case X86::BI__builtin_ia32_vinsertf128_ps256:
3815 case X86::BI__builtin_ia32_vinsertf128_si256:
3816 case X86::BI__builtin_ia32_insert128i256:
3817 case X86::BI__builtin_ia32_insertf32x8:
3818 case X86::BI__builtin_ia32_inserti32x8:
3819 case X86::BI__builtin_ia32_insertf64x4:
3820 case X86::BI__builtin_ia32_inserti64x4:
3821 case X86::BI__builtin_ia32_insertf64x2_256:
3822 case X86::BI__builtin_ia32_inserti64x2_256:
3823 case X86::BI__builtin_ia32_insertf32x4_256:
3824 case X86::BI__builtin_ia32_inserti32x4_256:
3825 i = 2; l = 0; u = 1;
3826 break;
3827 case X86::BI__builtin_ia32_vpermilpd:
3828 case X86::BI__builtin_ia32_vec_ext_v4hi:
3829 case X86::BI__builtin_ia32_vec_ext_v4si:
3830 case X86::BI__builtin_ia32_vec_ext_v4sf:
3831 case X86::BI__builtin_ia32_vec_ext_v4di:
3832 case X86::BI__builtin_ia32_extractf32x4_mask:
3833 case X86::BI__builtin_ia32_extracti32x4_mask:
3834 case X86::BI__builtin_ia32_extractf64x2_512_mask:
3835 case X86::BI__builtin_ia32_extracti64x2_512_mask:
3836 i = 1; l = 0; u = 3;
3837 break;
3838 case X86::BI_mm_prefetch:
3839 case X86::BI__builtin_ia32_vec_ext_v8hi:
3840 case X86::BI__builtin_ia32_vec_ext_v8si:
3841 i = 1; l = 0; u = 7;
3842 break;
3843 case X86::BI__builtin_ia32_sha1rnds4:
3844 case X86::BI__builtin_ia32_blendpd:
3845 case X86::BI__builtin_ia32_shufpd:
3846 case X86::BI__builtin_ia32_vec_set_v4hi:
3847 case X86::BI__builtin_ia32_vec_set_v4si:
3848 case X86::BI__builtin_ia32_vec_set_v4di:
3849 case X86::BI__builtin_ia32_shuf_f32x4_256:
3850 case X86::BI__builtin_ia32_shuf_f64x2_256:
3851 case X86::BI__builtin_ia32_shuf_i32x4_256:
3852 case X86::BI__builtin_ia32_shuf_i64x2_256:
3853 case X86::BI__builtin_ia32_insertf64x2_512:
3854 case X86::BI__builtin_ia32_inserti64x2_512:
3855 case X86::BI__builtin_ia32_insertf32x4:
3856 case X86::BI__builtin_ia32_inserti32x4:
3857 i = 2; l = 0; u = 3;
3858 break;
3859 case X86::BI__builtin_ia32_vpermil2pd:
3860 case X86::BI__builtin_ia32_vpermil2pd256:
3861 case X86::BI__builtin_ia32_vpermil2ps:
3862 case X86::BI__builtin_ia32_vpermil2ps256:
3863 i = 3; l = 0; u = 3;
3864 break;
3865 case X86::BI__builtin_ia32_cmpb128_mask:
3866 case X86::BI__builtin_ia32_cmpw128_mask:
3867 case X86::BI__builtin_ia32_cmpd128_mask:
3868 case X86::BI__builtin_ia32_cmpq128_mask:
3869 case X86::BI__builtin_ia32_cmpb256_mask:
3870 case X86::BI__builtin_ia32_cmpw256_mask:
3871 case X86::BI__builtin_ia32_cmpd256_mask:
3872 case X86::BI__builtin_ia32_cmpq256_mask:
3873 case X86::BI__builtin_ia32_cmpb512_mask:
3874 case X86::BI__builtin_ia32_cmpw512_mask:
3875 case X86::BI__builtin_ia32_cmpd512_mask:
3876 case X86::BI__builtin_ia32_cmpq512_mask:
3877 case X86::BI__builtin_ia32_ucmpb128_mask:
3878 case X86::BI__builtin_ia32_ucmpw128_mask:
3879 case X86::BI__builtin_ia32_ucmpd128_mask:
3880 case X86::BI__builtin_ia32_ucmpq128_mask:
3881 case X86::BI__builtin_ia32_ucmpb256_mask:
3882 case X86::BI__builtin_ia32_ucmpw256_mask:
3883 case X86::BI__builtin_ia32_ucmpd256_mask:
3884 case X86::BI__builtin_ia32_ucmpq256_mask:
3885 case X86::BI__builtin_ia32_ucmpb512_mask:
3886 case X86::BI__builtin_ia32_ucmpw512_mask:
3887 case X86::BI__builtin_ia32_ucmpd512_mask:
3888 case X86::BI__builtin_ia32_ucmpq512_mask:
3889 case X86::BI__builtin_ia32_vpcomub:
3890 case X86::BI__builtin_ia32_vpcomuw:
3891 case X86::BI__builtin_ia32_vpcomud:
3892 case X86::BI__builtin_ia32_vpcomuq:
3893 case X86::BI__builtin_ia32_vpcomb:
3894 case X86::BI__builtin_ia32_vpcomw:
3895 case X86::BI__builtin_ia32_vpcomd:
3896 case X86::BI__builtin_ia32_vpcomq:
3897 case X86::BI__builtin_ia32_vec_set_v8hi:
3898 case X86::BI__builtin_ia32_vec_set_v8si:
3899 i = 2; l = 0; u = 7;
3900 break;
3901 case X86::BI__builtin_ia32_vpermilpd256:
3902 case X86::BI__builtin_ia32_roundps:
3903 case X86::BI__builtin_ia32_roundpd:
3904 case X86::BI__builtin_ia32_roundps256:
3905 case X86::BI__builtin_ia32_roundpd256:
3906 case X86::BI__builtin_ia32_getmantpd128_mask:
3907 case X86::BI__builtin_ia32_getmantpd256_mask:
3908 case X86::BI__builtin_ia32_getmantps128_mask:
3909 case X86::BI__builtin_ia32_getmantps256_mask:
3910 case X86::BI__builtin_ia32_getmantpd512_mask:
3911 case X86::BI__builtin_ia32_getmantps512_mask:
3912 case X86::BI__builtin_ia32_vec_ext_v16qi:
3913 case X86::BI__builtin_ia32_vec_ext_v16hi:
3914 i = 1; l = 0; u = 15;
3915 break;
3916 case X86::BI__builtin_ia32_pblendd128:
3917 case X86::BI__builtin_ia32_blendps:
3918 case X86::BI__builtin_ia32_blendpd256:
3919 case X86::BI__builtin_ia32_shufpd256:
3920 case X86::BI__builtin_ia32_roundss:
3921 case X86::BI__builtin_ia32_roundsd:
3922 case X86::BI__builtin_ia32_rangepd128_mask:
3923 case X86::BI__builtin_ia32_rangepd256_mask:
3924 case X86::BI__builtin_ia32_rangepd512_mask:
3925 case X86::BI__builtin_ia32_rangeps128_mask:
3926 case X86::BI__builtin_ia32_rangeps256_mask:
3927 case X86::BI__builtin_ia32_rangeps512_mask:
3928 case X86::BI__builtin_ia32_getmantsd_round_mask:
3929 case X86::BI__builtin_ia32_getmantss_round_mask:
3930 case X86::BI__builtin_ia32_vec_set_v16qi:
3931 case X86::BI__builtin_ia32_vec_set_v16hi:
3932 i = 2; l = 0; u = 15;
3933 break;
3934 case X86::BI__builtin_ia32_vec_ext_v32qi:
3935 i = 1; l = 0; u = 31;
3936 break;
3937 case X86::BI__builtin_ia32_cmpps:
3938 case X86::BI__builtin_ia32_cmpss:
3939 case X86::BI__builtin_ia32_cmppd:
3940 case X86::BI__builtin_ia32_cmpsd:
3941 case X86::BI__builtin_ia32_cmpps256:
3942 case X86::BI__builtin_ia32_cmppd256:
3943 case X86::BI__builtin_ia32_cmpps128_mask:
3944 case X86::BI__builtin_ia32_cmppd128_mask:
3945 case X86::BI__builtin_ia32_cmpps256_mask:
3946 case X86::BI__builtin_ia32_cmppd256_mask:
3947 case X86::BI__builtin_ia32_cmpps512_mask:
3948 case X86::BI__builtin_ia32_cmppd512_mask:
3949 case X86::BI__builtin_ia32_cmpsd_mask:
3950 case X86::BI__builtin_ia32_cmpss_mask:
3951 case X86::BI__builtin_ia32_vec_set_v32qi:
3952 i = 2; l = 0; u = 31;
3953 break;
3954 case X86::BI__builtin_ia32_permdf256:
3955 case X86::BI__builtin_ia32_permdi256:
3956 case X86::BI__builtin_ia32_permdf512:
3957 case X86::BI__builtin_ia32_permdi512:
3958 case X86::BI__builtin_ia32_vpermilps:
3959 case X86::BI__builtin_ia32_vpermilps256:
3960 case X86::BI__builtin_ia32_vpermilpd512:
3961 case X86::BI__builtin_ia32_vpermilps512:
3962 case X86::BI__builtin_ia32_pshufd:
3963 case X86::BI__builtin_ia32_pshufd256:
3964 case X86::BI__builtin_ia32_pshufd512:
3965 case X86::BI__builtin_ia32_pshufhw:
3966 case X86::BI__builtin_ia32_pshufhw256:
3967 case X86::BI__builtin_ia32_pshufhw512:
3968 case X86::BI__builtin_ia32_pshuflw:
3969 case X86::BI__builtin_ia32_pshuflw256:
3970 case X86::BI__builtin_ia32_pshuflw512:
3971 case X86::BI__builtin_ia32_vcvtps2ph:
3972 case X86::BI__builtin_ia32_vcvtps2ph_mask:
3973 case X86::BI__builtin_ia32_vcvtps2ph256:
3974 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
3975 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
3976 case X86::BI__builtin_ia32_rndscaleps_128_mask:
3977 case X86::BI__builtin_ia32_rndscalepd_128_mask:
3978 case X86::BI__builtin_ia32_rndscaleps_256_mask:
3979 case X86::BI__builtin_ia32_rndscalepd_256_mask:
3980 case X86::BI__builtin_ia32_rndscaleps_mask:
3981 case X86::BI__builtin_ia32_rndscalepd_mask:
3982 case X86::BI__builtin_ia32_reducepd128_mask:
3983 case X86::BI__builtin_ia32_reducepd256_mask:
3984 case X86::BI__builtin_ia32_reducepd512_mask:
3985 case X86::BI__builtin_ia32_reduceps128_mask:
3986 case X86::BI__builtin_ia32_reduceps256_mask:
3987 case X86::BI__builtin_ia32_reduceps512_mask:
3988 case X86::BI__builtin_ia32_prold512:
3989 case X86::BI__builtin_ia32_prolq512:
3990 case X86::BI__builtin_ia32_prold128:
3991 case X86::BI__builtin_ia32_prold256:
3992 case X86::BI__builtin_ia32_prolq128:
3993 case X86::BI__builtin_ia32_prolq256:
3994 case X86::BI__builtin_ia32_prord512:
3995 case X86::BI__builtin_ia32_prorq512:
3996 case X86::BI__builtin_ia32_prord128:
3997 case X86::BI__builtin_ia32_prord256:
3998 case X86::BI__builtin_ia32_prorq128:
3999 case X86::BI__builtin_ia32_prorq256:
4000 case X86::BI__builtin_ia32_fpclasspd128_mask:
4001 case X86::BI__builtin_ia32_fpclasspd256_mask:
4002 case X86::BI__builtin_ia32_fpclassps128_mask:
4003 case X86::BI__builtin_ia32_fpclassps256_mask:
4004 case X86::BI__builtin_ia32_fpclassps512_mask:
4005 case X86::BI__builtin_ia32_fpclasspd512_mask:
4006 case X86::BI__builtin_ia32_fpclasssd_mask:
4007 case X86::BI__builtin_ia32_fpclassss_mask:
4008 case X86::BI__builtin_ia32_pslldqi128_byteshift:
4009 case X86::BI__builtin_ia32_pslldqi256_byteshift:
4010 case X86::BI__builtin_ia32_pslldqi512_byteshift:
4011 case X86::BI__builtin_ia32_psrldqi128_byteshift:
4012 case X86::BI__builtin_ia32_psrldqi256_byteshift:
4013 case X86::BI__builtin_ia32_psrldqi512_byteshift:
4014 case X86::BI__builtin_ia32_kshiftliqi:
4015 case X86::BI__builtin_ia32_kshiftlihi:
4016 case X86::BI__builtin_ia32_kshiftlisi:
4017 case X86::BI__builtin_ia32_kshiftlidi:
4018 case X86::BI__builtin_ia32_kshiftriqi:
4019 case X86::BI__builtin_ia32_kshiftrihi:
4020 case X86::BI__builtin_ia32_kshiftrisi:
4021 case X86::BI__builtin_ia32_kshiftridi:
4022 i = 1; l = 0; u = 255;
4023 break;
4024 case X86::BI__builtin_ia32_vperm2f128_pd256:
4025 case X86::BI__builtin_ia32_vperm2f128_ps256:
4026 case X86::BI__builtin_ia32_vperm2f128_si256:
4027 case X86::BI__builtin_ia32_permti256:
4028 case X86::BI__builtin_ia32_pblendw128:
4029 case X86::BI__builtin_ia32_pblendw256:
4030 case X86::BI__builtin_ia32_blendps256:
4031 case X86::BI__builtin_ia32_pblendd256:
4032 case X86::BI__builtin_ia32_palignr128:
4033 case X86::BI__builtin_ia32_palignr256:
4034 case X86::BI__builtin_ia32_palignr512:
4035 case X86::BI__builtin_ia32_alignq512:
4036 case X86::BI__builtin_ia32_alignd512:
4037 case X86::BI__builtin_ia32_alignd128:
4038 case X86::BI__builtin_ia32_alignd256:
4039 case X86::BI__builtin_ia32_alignq128:
4040 case X86::BI__builtin_ia32_alignq256:
4041 case X86::BI__builtin_ia32_vcomisd:
4042 case X86::BI__builtin_ia32_vcomiss:
4043 case X86::BI__builtin_ia32_shuf_f32x4:
4044 case X86::BI__builtin_ia32_shuf_f64x2:
4045 case X86::BI__builtin_ia32_shuf_i32x4:
4046 case X86::BI__builtin_ia32_shuf_i64x2:
4047 case X86::BI__builtin_ia32_shufpd512:
4048 case X86::BI__builtin_ia32_shufps:
4049 case X86::BI__builtin_ia32_shufps256:
4050 case X86::BI__builtin_ia32_shufps512:
4051 case X86::BI__builtin_ia32_dbpsadbw128:
4052 case X86::BI__builtin_ia32_dbpsadbw256:
4053 case X86::BI__builtin_ia32_dbpsadbw512:
4054 case X86::BI__builtin_ia32_vpshldd128:
4055 case X86::BI__builtin_ia32_vpshldd256:
4056 case X86::BI__builtin_ia32_vpshldd512:
4057 case X86::BI__builtin_ia32_vpshldq128:
4058 case X86::BI__builtin_ia32_vpshldq256:
4059 case X86::BI__builtin_ia32_vpshldq512:
4060 case X86::BI__builtin_ia32_vpshldw128:
4061 case X86::BI__builtin_ia32_vpshldw256:
4062 case X86::BI__builtin_ia32_vpshldw512:
4063 case X86::BI__builtin_ia32_vpshrdd128:
4064 case X86::BI__builtin_ia32_vpshrdd256:
4065 case X86::BI__builtin_ia32_vpshrdd512:
4066 case X86::BI__builtin_ia32_vpshrdq128:
4067 case X86::BI__builtin_ia32_vpshrdq256:
4068 case X86::BI__builtin_ia32_vpshrdq512:
4069 case X86::BI__builtin_ia32_vpshrdw128:
4070 case X86::BI__builtin_ia32_vpshrdw256:
4071 case X86::BI__builtin_ia32_vpshrdw512:
4072 i = 2; l = 0; u = 255;
4073 break;
4074 case X86::BI__builtin_ia32_fixupimmpd512_mask:
4075 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4076 case X86::BI__builtin_ia32_fixupimmps512_mask:
4077 case X86::BI__builtin_ia32_fixupimmps512_maskz:
4078 case X86::BI__builtin_ia32_fixupimmsd_mask:
4079 case X86::BI__builtin_ia32_fixupimmsd_maskz:
4080 case X86::BI__builtin_ia32_fixupimmss_mask:
4081 case X86::BI__builtin_ia32_fixupimmss_maskz:
4082 case X86::BI__builtin_ia32_fixupimmpd128_mask:
4083 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
4084 case X86::BI__builtin_ia32_fixupimmpd256_mask:
4085 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
4086 case X86::BI__builtin_ia32_fixupimmps128_mask:
4087 case X86::BI__builtin_ia32_fixupimmps128_maskz:
4088 case X86::BI__builtin_ia32_fixupimmps256_mask:
4089 case X86::BI__builtin_ia32_fixupimmps256_maskz:
4090 case X86::BI__builtin_ia32_pternlogd512_mask:
4091 case X86::BI__builtin_ia32_pternlogd512_maskz:
4092 case X86::BI__builtin_ia32_pternlogq512_mask:
4093 case X86::BI__builtin_ia32_pternlogq512_maskz:
4094 case X86::BI__builtin_ia32_pternlogd128_mask:
4095 case X86::BI__builtin_ia32_pternlogd128_maskz:
4096 case X86::BI__builtin_ia32_pternlogd256_mask:
4097 case X86::BI__builtin_ia32_pternlogd256_maskz:
4098 case X86::BI__builtin_ia32_pternlogq128_mask:
4099 case X86::BI__builtin_ia32_pternlogq128_maskz:
4100 case X86::BI__builtin_ia32_pternlogq256_mask:
4101 case X86::BI__builtin_ia32_pternlogq256_maskz:
4102 i = 3; l = 0; u = 255;
4103 break;
4104 case X86::BI__builtin_ia32_gatherpfdpd:
4105 case X86::BI__builtin_ia32_gatherpfdps:
4106 case X86::BI__builtin_ia32_gatherpfqpd:
4107 case X86::BI__builtin_ia32_gatherpfqps:
4108 case X86::BI__builtin_ia32_scatterpfdpd:
4109 case X86::BI__builtin_ia32_scatterpfdps:
4110 case X86::BI__builtin_ia32_scatterpfqpd:
4111 case X86::BI__builtin_ia32_scatterpfqps:
4112 i = 4; l = 2; u = 3;
4113 break;
4114 case X86::BI__builtin_ia32_rndscalesd_round_mask:
4115 case X86::BI__builtin_ia32_rndscaless_round_mask:
4116 i = 4; l = 0; u = 255;
4117 break;
4118 }
4119
4120 // Note that we don't force a hard error on the range check here, allowing
4121 // template-generated or macro-generated dead code to potentially have out-of-
4122 // range values. These need to code generate, but don't need to necessarily
4123 // make any sense. We use a warning that defaults to an error.
4124 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
4125}
4126
4127/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
4128/// parameter with the FormatAttr's correct format_idx and firstDataArg.
4129/// Returns true when the format fits the function and the FormatStringInfo has
4130/// been populated.
4131bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
4132 FormatStringInfo *FSI) {
4133 FSI->HasVAListArg = Format->getFirstArg() == 0;
4134 FSI->FormatIdx = Format->getFormatIdx() - 1;
4135 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
4136
4137 // The way the format attribute works in GCC, the implicit this argument
4138 // of member functions is counted. However, it doesn't appear in our own
4139 // lists, so decrement format_idx in that case.
4140 if (IsCXXMember) {
4141 if(FSI->FormatIdx == 0)
4142 return false;
4143 --FSI->FormatIdx;
4144 if (FSI->FirstDataArg != 0)
4145 --FSI->FirstDataArg;
4146 }
4147 return true;
4148}
4149
4150/// Checks if a the given expression evaluates to null.
4151///
4152/// Returns true if the value evaluates to null.
4153static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
4154 // If the expression has non-null type, it doesn't evaluate to null.
4155 if (auto nullability
4156 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
4157 if (*nullability == NullabilityKind::NonNull)
4158 return false;
4159 }
4160
4161 // As a special case, transparent unions initialized with zero are
4162 // considered null for the purposes of the nonnull attribute.
4163 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
4164 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
4165 if (const CompoundLiteralExpr *CLE =
4166 dyn_cast<CompoundLiteralExpr>(Expr))
4167 if (const InitListExpr *ILE =
4168 dyn_cast<InitListExpr>(CLE->getInitializer()))
4169 Expr = ILE->getInit(0);
4170 }
4171
4172 bool Result;
4173 return (!Expr->isValueDependent() &&
4174 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
4175 !Result);
4176}
4177
4178static void CheckNonNullArgument(Sema &S,
4179 const Expr *ArgExpr,
4180 SourceLocation CallSiteLoc) {
4181 if (CheckNonNullExpr(S, ArgExpr))
4182 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
4183 S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange());
4184}
4185
4186bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
4187 FormatStringInfo FSI;
4188 if ((GetFormatStringType(Format) == FST_NSString) &&
4189 getFormatStringInfo(Format, false, &FSI)) {
4190 Idx = FSI.FormatIdx;
4191 return true;
4192 }
4193 return false;
4194}
4195
4196/// Diagnose use of %s directive in an NSString which is being passed
4197/// as formatting string to formatting method.
4198static void
4199DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
4200 const NamedDecl *FDecl,
4201 Expr **Args,
4202 unsigned NumArgs) {
4203 unsigned Idx = 0;
4204 bool Format = false;
4205 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
4206 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
4207 Idx = 2;
4208 Format = true;
4209 }
4210 else
4211 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4212 if (S.GetFormatNSStringIdx(I, Idx)) {
4213 Format = true;
4214 break;
4215 }
4216 }
4217 if (!Format || NumArgs <= Idx)
4218 return;
4219 const Expr *FormatExpr = Args[Idx];
4220 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
4221 FormatExpr = CSCE->getSubExpr();
4222 const StringLiteral *FormatString;
4223 if (const ObjCStringLiteral *OSL =
4224 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
4225 FormatString = OSL->getString();
4226 else
4227 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
4228 if (!FormatString)
4229 return;
4230 if (S.FormatStringHasSArg(FormatString)) {
4231 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
4232 << "%s" << 1 << 1;
4233 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
4234 << FDecl->getDeclName();
4235 }
4236}
4237
4238/// Determine whether the given type has a non-null nullability annotation.
4239static bool isNonNullType(ASTContext &ctx, QualType type) {
4240 if (auto nullability = type->getNullability(ctx))
4241 return *nullability == NullabilityKind::NonNull;
4242
4243 return false;
4244}
4245
4246static void CheckNonNullArguments(Sema &S,
4247 const NamedDecl *FDecl,
4248 const FunctionProtoType *Proto,
4249 ArrayRef<const Expr *> Args,
4250 SourceLocation CallSiteLoc) {
4251 assert((FDecl || Proto) && "Need a function declaration or prototype");
4252
4253 // Check the attributes attached to the method/function itself.
4254 llvm::SmallBitVector NonNullArgs;
4255 if (FDecl) {
4256 // Handle the nonnull attribute on the function/method declaration itself.
4257 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
4258 if (!NonNull->args_size()) {
4259 // Easy case: all pointer arguments are nonnull.
4260 for (const auto *Arg : Args)
4261 if (S.isValidPointerAttrType(Arg->getType()))
4262 CheckNonNullArgument(S, Arg, CallSiteLoc);
4263 return;
4264 }
4265
4266 for (const ParamIdx &Idx : NonNull->args()) {
4267 unsigned IdxAST = Idx.getASTIndex();
4268 if (IdxAST >= Args.size())
4269 continue;
4270 if (NonNullArgs.empty())
4271 NonNullArgs.resize(Args.size());
4272 NonNullArgs.set(IdxAST);
4273 }
4274 }
4275 }
4276
4277 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
4278 // Handle the nonnull attribute on the parameters of the
4279 // function/method.
4280 ArrayRef<ParmVarDecl*> parms;
4281 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
4282 parms = FD->parameters();
4283 else
4284 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
4285
4286 unsigned ParamIndex = 0;
4287 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
4288 I != E; ++I, ++ParamIndex) {
4289 const ParmVarDecl *PVD = *I;
4290 if (PVD->hasAttr<NonNullAttr>() ||
4291 isNonNullType(S.Context, PVD->getType())) {
4292 if (NonNullArgs.empty())
4293 NonNullArgs.resize(Args.size());
4294
4295 NonNullArgs.set(ParamIndex);
4296 }
4297 }
4298 } else {
4299 // If we have a non-function, non-method declaration but no
4300 // function prototype, try to dig out the function prototype.
4301 if (!Proto) {
4302 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
4303 QualType type = VD->getType().getNonReferenceType();
4304 if (auto pointerType = type->getAs<PointerType>())
4305 type = pointerType->getPointeeType();
4306 else if (auto blockType = type->getAs<BlockPointerType>())
4307 type = blockType->getPointeeType();
4308 // FIXME: data member pointers?
4309
4310 // Dig out the function prototype, if there is one.
4311 Proto = type->getAs<FunctionProtoType>();
4312 }
4313 }
4314
4315 // Fill in non-null argument information from the nullability
4316 // information on the parameter types (if we have them).
4317 if (Proto) {
4318 unsigned Index = 0;
4319 for (auto paramType : Proto->getParamTypes()) {
4320 if (isNonNullType(S.Context, paramType)) {
4321 if (NonNullArgs.empty())
4322 NonNullArgs.resize(Args.size());
4323
4324 NonNullArgs.set(Index);
4325 }
4326
4327 ++Index;
4328 }
4329 }
4330 }
4331
4332 // Check for non-null arguments.
4333 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
4334 ArgIndex != ArgIndexEnd; ++ArgIndex) {
4335 if (NonNullArgs[ArgIndex])
4336 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
4337 }
4338}
4339
4340/// Handles the checks for format strings, non-POD arguments to vararg
4341/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
4342/// attributes.
4343void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
4344 const Expr *ThisArg, ArrayRef<const Expr *> Args,
4345 bool IsMemberFunction, SourceLocation Loc,
4346 SourceRange Range, VariadicCallType CallType) {
4347 // FIXME: We should check as much as we can in the template definition.
4348 if (CurContext->isDependentContext())
4349 return;
4350
4351 // Printf and scanf checking.
4352 llvm::SmallBitVector CheckedVarArgs;
4353 if (FDecl) {
4354 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4355 // Only create vector if there are format attributes.
4356 CheckedVarArgs.resize(Args.size());
4357
4358 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
4359 CheckedVarArgs);
4360 }
4361 }
4362
4363 // Refuse POD arguments that weren't caught by the format string
4364 // checks above.
4365 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
4366 if (CallType != VariadicDoesNotApply &&
4367 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
4368 unsigned NumParams = Proto ? Proto->getNumParams()
4369 : FDecl && isa<FunctionDecl>(FDecl)
4370 ? cast<FunctionDecl>(FDecl)->getNumParams()
4371 : FDecl && isa<ObjCMethodDecl>(FDecl)
4372 ? cast<ObjCMethodDecl>(FDecl)->param_size()
4373 : 0;
4374
4375 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
4376 // Args[ArgIdx] can be null in malformed code.
4377 if (const Expr *Arg = Args[ArgIdx]) {
4378 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
4379 checkVariadicArgument(Arg, CallType);
4380 }
4381 }
4382 }
4383
4384 if (FDecl || Proto) {
4385 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
4386
4387 // Type safety checking.
4388 if (FDecl) {
4389 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
4390 CheckArgumentWithTypeTag(I, Args, Loc);
4391 }
4392 }
4393
4394 if (FD)
4395 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
4396}
4397
4398/// CheckConstructorCall - Check a constructor call for correctness and safety
4399/// properties not enforced by the C type system.
4400void Sema::CheckConstructorCall(FunctionDecl *FDecl,
4401 ArrayRef<const Expr *> Args,
4402 const FunctionProtoType *Proto,
4403 SourceLocation Loc) {
4404 VariadicCallType CallType =
4405 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
4406 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
4407 Loc, SourceRange(), CallType);
4408}
4409
4410/// CheckFunctionCall - Check a direct function call for various correctness
4411/// and safety properties not strictly enforced by the C type system.
4412bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
4413 const FunctionProtoType *Proto) {
4414 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
4415 isa<CXXMethodDecl>(FDecl);
4416 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
4417 IsMemberOperatorCall;
4418 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
4419 TheCall->getCallee());
4420 Expr** Args = TheCall->getArgs();
4421 unsigned NumArgs = TheCall->getNumArgs();
4422
4423 Expr *ImplicitThis = nullptr;
4424 if (IsMemberOperatorCall) {
4425 // If this is a call to a member operator, hide the first argument
4426 // from checkCall.
4427 // FIXME: Our choice of AST representation here is less than ideal.
4428 ImplicitThis = Args[0];
4429 ++Args;
4430 --NumArgs;
4431 } else if (IsMemberFunction)
4432 ImplicitThis =
4433 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
4434
4435 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
4436 IsMemberFunction, TheCall->getRParenLoc(),
4437 TheCall->getCallee()->getSourceRange(), CallType);
4438
4439 IdentifierInfo *FnInfo = FDecl->getIdentifier();
4440 // None of the checks below are needed for functions that don't have
4441 // simple names (e.g., C++ conversion functions).
4442 if (!FnInfo)
4443 return false;
4444
4445 CheckAbsoluteValueFunction(TheCall, FDecl);
4446 CheckMaxUnsignedZero(TheCall, FDecl);
4447
4448 if (getLangOpts().ObjC)
4449 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
4450
4451 unsigned CMId = FDecl->getMemoryFunctionKind();
4452 if (CMId == 0)
4453 return false;
4454
4455 // Handle memory setting and copying functions.
4456 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
4457 CheckStrlcpycatArguments(TheCall, FnInfo);
4458 else if (CMId == Builtin::BIstrncat)
4459 CheckStrncatArguments(TheCall, FnInfo);
4460 else
4461 CheckMemaccessArguments(TheCall, CMId, FnInfo);
4462
4463 return false;
4464}
4465
4466bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
4467 ArrayRef<const Expr *> Args) {
4468 VariadicCallType CallType =
4469 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
4470
4471 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
4472 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
4473 CallType);
4474
4475 return false;
4476}
4477
4478bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
4479 const FunctionProtoType *Proto) {
4480 QualType Ty;
4481 if (const auto *V = dyn_cast<VarDecl>(NDecl))
4482 Ty = V->getType().getNonReferenceType();
4483 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
4484 Ty = F->getType().getNonReferenceType();
4485 else
4486 return false;
4487
4488 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
4489 !Ty->isFunctionProtoType())
4490 return false;
4491
4492 VariadicCallType CallType;
4493 if (!Proto || !Proto->isVariadic()) {
4494 CallType = VariadicDoesNotApply;
4495 } else if (Ty->isBlockPointerType()) {
4496 CallType = VariadicBlock;
4497 } else { // Ty->isFunctionPointerType()
4498 CallType = VariadicFunction;
4499 }
4500
4501 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
4502 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4503 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4504 TheCall->getCallee()->getSourceRange(), CallType);
4505
4506 return false;
4507}
4508
4509/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
4510/// such as function pointers returned from functions.
4511bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
4512 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
4513 TheCall->getCallee());
4514 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
4515 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4516 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4517 TheCall->getCallee()->getSourceRange(), CallType);
4518
4519 return false;
4520}
4521
4522static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
4523 if (!llvm::isValidAtomicOrderingCABI(Ordering))
4524 return false;
4525
4526 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
4527 switch (Op) {
4528 case AtomicExpr::AO__c11_atomic_init:
4529 case AtomicExpr::AO__opencl_atomic_init:
4530 llvm_unreachable("There is no ordering argument for an init");
4531
4532 case AtomicExpr::AO__c11_atomic_load:
4533 case AtomicExpr::AO__opencl_atomic_load:
4534 case AtomicExpr::AO__atomic_load_n:
4535 case AtomicExpr::AO__atomic_load:
4536 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
4537 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4538
4539 case AtomicExpr::AO__c11_atomic_store:
4540 case AtomicExpr::AO__opencl_atomic_store:
4541 case AtomicExpr::AO__atomic_store:
4542 case AtomicExpr::AO__atomic_store_n:
4543 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
4544 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
4545 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4546
4547 default:
4548 return true;
4549 }
4550}
4551
4552ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
4553 AtomicExpr::AtomicOp Op) {
4554 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
4555 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4556
4557 // All the non-OpenCL operations take one of the following forms.
4558 // The OpenCL operations take the __c11 forms with one extra argument for
4559 // synchronization scope.
4560 enum {
4561 // C __c11_atomic_init(A *, C)
4562 Init,
4563
4564 // C __c11_atomic_load(A *, int)
4565 Load,
4566
4567 // void __atomic_load(A *, CP, int)
4568 LoadCopy,
4569
4570 // void __atomic_store(A *, CP, int)
4571 Copy,
4572
4573 // C __c11_atomic_add(A *, M, int)
4574 Arithmetic,
4575
4576 // C __atomic_exchange_n(A *, CP, int)
4577 Xchg,
4578
4579 // void __atomic_exchange(A *, C *, CP, int)
4580 GNUXchg,
4581
4582 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
4583 C11CmpXchg,
4584
4585 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
4586 GNUCmpXchg
4587 } Form = Init;
4588
4589 const unsigned NumForm = GNUCmpXchg + 1;
4590 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
4591 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
4592 // where:
4593 // C is an appropriate type,
4594 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
4595 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
4596 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
4597 // the int parameters are for orderings.
4598
4599 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
4600 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
4601 "need to update code for modified forms");
4602 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
4603 AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
4604 AtomicExpr::AO__atomic_load,
4605 "need to update code for modified C11 atomics");
4606 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
4607 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
4608 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
4609 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
4610 IsOpenCL;
4611 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
4612 Op == AtomicExpr::AO__atomic_store_n ||
4613 Op == AtomicExpr::AO__atomic_exchange_n ||
4614 Op == AtomicExpr::AO__atomic_compare_exchange_n;
4615 bool IsAddSub = false;
4616 bool IsMinMax = false;
4617
4618 switch (Op) {
4619 case AtomicExpr::AO__c11_atomic_init:
4620 case AtomicExpr::AO__opencl_atomic_init:
4621 Form = Init;
4622 break;
4623
4624 case AtomicExpr::AO__c11_atomic_load:
4625 case AtomicExpr::AO__opencl_atomic_load:
4626 case AtomicExpr::AO__atomic_load_n:
4627 Form = Load;
4628 break;
4629
4630 case AtomicExpr::AO__atomic_load:
4631 Form = LoadCopy;
4632 break;
4633
4634 case AtomicExpr::AO__c11_atomic_store:
4635 case AtomicExpr::AO__opencl_atomic_store:
4636 case AtomicExpr::AO__atomic_store:
4637 case AtomicExpr::AO__atomic_store_n:
4638 Form = Copy;
4639 break;
4640
4641 case AtomicExpr::AO__c11_atomic_fetch_add:
4642 case AtomicExpr::AO__c11_atomic_fetch_sub:
4643 case AtomicExpr::AO__opencl_atomic_fetch_add:
4644 case AtomicExpr::AO__opencl_atomic_fetch_sub:
4645 case AtomicExpr::AO__opencl_atomic_fetch_min:
4646 case AtomicExpr::AO__opencl_atomic_fetch_max:
4647 case AtomicExpr::AO__atomic_fetch_add:
4648 case AtomicExpr::AO__atomic_fetch_sub:
4649 case AtomicExpr::AO__atomic_add_fetch:
4650 case AtomicExpr::AO__atomic_sub_fetch:
4651 IsAddSub = true;
4652 LLVM_FALLTHROUGH;
4653 case AtomicExpr::AO__c11_atomic_fetch_and:
4654 case AtomicExpr::AO__c11_atomic_fetch_or:
4655 case AtomicExpr::AO__c11_atomic_fetch_xor:
4656 case AtomicExpr::AO__opencl_atomic_fetch_and:
4657 case AtomicExpr::AO__opencl_atomic_fetch_or:
4658 case AtomicExpr::AO__opencl_atomic_fetch_xor:
4659 case AtomicExpr::AO__atomic_fetch_and:
4660 case AtomicExpr::AO__atomic_fetch_or:
4661 case AtomicExpr::AO__atomic_fetch_xor:
4662 case AtomicExpr::AO__atomic_fetch_nand:
4663 case AtomicExpr::AO__atomic_and_fetch:
4664 case AtomicExpr::AO__atomic_or_fetch:
4665 case AtomicExpr::AO__atomic_xor_fetch:
4666 case AtomicExpr::AO__atomic_nand_fetch:
4667 Form = Arithmetic;
4668 break;
4669
4670 case AtomicExpr::AO__atomic_fetch_min:
4671 case AtomicExpr::AO__atomic_fetch_max:
4672 IsMinMax = true;
4673 Form = Arithmetic;
4674 break;
4675
4676 case AtomicExpr::AO__c11_atomic_exchange:
4677 case AtomicExpr::AO__opencl_atomic_exchange:
4678 case AtomicExpr::AO__atomic_exchange_n:
4679 Form = Xchg;
4680 break;
4681
4682 case AtomicExpr::AO__atomic_exchange:
4683 Form = GNUXchg;
4684 break;
4685
4686 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
4687 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
4688 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
4689 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
4690 Form = C11CmpXchg;
4691 break;
4692
4693 case AtomicExpr::AO__atomic_compare_exchange:
4694 case AtomicExpr::AO__atomic_compare_exchange_n:
4695 Form = GNUCmpXchg;
4696 break;
4697 }
4698
4699 unsigned AdjustedNumArgs = NumArgs[Form];
4700 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
4701 ++AdjustedNumArgs;
4702 // Check we have the right number of arguments.
4703 if (TheCall->getNumArgs() < AdjustedNumArgs) {
4704 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
4705 << 0 << AdjustedNumArgs << TheCall->getNumArgs()
4706 << TheCall->getCallee()->getSourceRange();
4707 return ExprError();
4708 } else if (TheCall->getNumArgs() > AdjustedNumArgs) {
4709 Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(),
4710 diag::err_typecheck_call_too_many_args)
4711 << 0 << AdjustedNumArgs << TheCall->getNumArgs()
4712 << TheCall->getCallee()->getSourceRange();
4713 return ExprError();
4714 }
4715
4716 // Inspect the first argument of the atomic operation.
4717 Expr *Ptr = TheCall->getArg(0);
4718 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
4719 if (ConvertedPtr.isInvalid())
4720 return ExprError();
4721
4722 Ptr = ConvertedPtr.get();
4723 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
4724 if (!pointerType) {
4725 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
4726 << Ptr->getType() << Ptr->getSourceRange();
4727 return ExprError();
4728 }
4729
4730 // For a __c11 builtin, this should be a pointer to an _Atomic type.
4731 QualType AtomTy = pointerType->getPointeeType(); // 'A'
4732 QualType ValType = AtomTy; // 'C'
4733 if (IsC11) {
4734 if (!AtomTy->isAtomicType()) {
4735 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic)
4736 << Ptr->getType() << Ptr->getSourceRange();
4737 return ExprError();
4738 }
4739 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
4740 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
4741 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic)
4742 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
4743 << Ptr->getSourceRange();
4744 return ExprError();
4745 }
4746 ValType = AtomTy->getAs<AtomicType>()->getValueType();
4747 } else if (Form != Load && Form != LoadCopy) {
4748 if (ValType.isConstQualified()) {
4749 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer)
4750 << Ptr->getType() << Ptr->getSourceRange();
4751 return ExprError();
4752 }
4753 }
4754
4755 // For an arithmetic operation, the implied arithmetic must be well-formed.
4756 if (Form == Arithmetic) {
4757 // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
4758 if (IsAddSub && !ValType->isIntegerType()
4759 && !ValType->isPointerType()) {
4760 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4761 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4762 return ExprError();
4763 }
4764 if (IsMinMax) {
4765 const BuiltinType *BT = ValType->getAs<BuiltinType>();
4766 if (!BT || (BT->getKind() != BuiltinType::Int &&
4767 BT->getKind() != BuiltinType::UInt)) {
4768 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr);
4769 return ExprError();
4770 }
4771 }
4772 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
4773 Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int)
4774 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4775 return ExprError();
4776 }
4777 if (IsC11 && ValType->isPointerType() &&
4778 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
4779 diag::err_incomplete_type)) {
4780 return ExprError();
4781 }
4782 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
4783 // For __atomic_*_n operations, the value type must be a scalar integral or
4784 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
4785 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4786 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4787 return ExprError();
4788 }
4789
4790 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
4791 !AtomTy->isScalarType()) {
4792 // For GNU atomics, require a trivially-copyable type. This is not part of
4793 // the GNU atomics specification, but we enforce it for sanity.
4794 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy)
4795 << Ptr->getType() << Ptr->getSourceRange();
4796 return ExprError();
4797 }
4798
4799 switch (ValType.getObjCLifetime()) {
4800 case Qualifiers::OCL_None:
4801 case Qualifiers::OCL_ExplicitNone:
4802 // okay
4803 break;
4804
4805 case Qualifiers::OCL_Weak:
4806 case Qualifiers::OCL_Strong:
4807 case Qualifiers::OCL_Autoreleasing:
4808 // FIXME: Can this happen? By this point, ValType should be known
4809 // to be trivially copyable.
4810 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
4811 << ValType << Ptr->getSourceRange();
4812 return ExprError();
4813 }
4814
4815 // All atomic operations have an overload which takes a pointer to a volatile
4816 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
4817 // into the result or the other operands. Similarly atomic_load takes a
4818 // pointer to a const 'A'.
4819 ValType.removeLocalVolatile();
4820 ValType.removeLocalConst();
4821 QualType ResultType = ValType;
4822 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
4823 Form == Init)
4824 ResultType = Context.VoidTy;
4825 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
4826 ResultType = Context.BoolTy;
4827
4828 // The type of a parameter passed 'by value'. In the GNU atomics, such
4829 // arguments are actually passed as pointers.
4830 QualType ByValType = ValType; // 'CP'
4831 bool IsPassedByAddress = false;
4832 if (!IsC11 && !IsN) {
4833 ByValType = Ptr->getType();
4834 IsPassedByAddress = true;
4835 }
4836
4837 // The first argument's non-CV pointer type is used to deduce the type of
4838 // subsequent arguments, except for:
4839 // - weak flag (always converted to bool)
4840 // - memory order (always converted to int)
4841 // - scope (always converted to int)
4842 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4843 QualType Ty;
4844 if (i < NumVals[Form] + 1) {
4845 switch (i) {
4846 case 0:
4847 // The first argument is always a pointer. It has a fixed type.
4848 // It is always dereferenced, a nullptr is undefined.
4849 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
4850 // Nothing else to do: we already know all we want about this pointer.
4851 continue;
4852 case 1:
4853 // The second argument is the non-atomic operand. For arithmetic, this
4854 // is always passed by value, and for a compare_exchange it is always
4855 // passed by address. For the rest, GNU uses by-address and C11 uses
4856 // by-value.
4857 assert(Form != Load);
4858 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
4859 Ty = ValType;
4860 else if (Form == Copy || Form == Xchg) {
4861 if (IsPassedByAddress)
4862 // The value pointer is always dereferenced, a nullptr is undefined.
4863 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
4864 Ty = ByValType;
4865 } else if (Form == Arithmetic)
4866 Ty = Context.getPointerDiffType();
4867 else {
4868 Expr *ValArg = TheCall->getArg(i);
4869 // The value pointer is always dereferenced, a nullptr is undefined.
4870 CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc());
4871 LangAS AS = LangAS::Default;
4872 // Keep address space of non-atomic pointer type.
4873 if (const PointerType *PtrTy =
4874 ValArg->getType()->getAs<PointerType>()) {
4875 AS = PtrTy->getPointeeType().getAddressSpace();
4876 }
4877 Ty = Context.getPointerType(
4878 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
4879 }
4880 break;
4881 case 2:
4882 // The third argument to compare_exchange / GNU exchange is the desired
4883 // value, either by-value (for the C11 and *_n variant) or as a pointer.
4884 if (IsPassedByAddress)
4885 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
4886 Ty = ByValType;
4887 break;
4888 case 3:
4889 // The fourth argument to GNU compare_exchange is a 'weak' flag.
4890 Ty = Context.BoolTy;
4891 break;
4892 }
4893 } else {
4894 // The order(s) and scope are always converted to int.
4895 Ty = Context.IntTy;
4896 }
4897
4898 InitializedEntity Entity =
4899 InitializedEntity::InitializeParameter(Context, Ty, false);
4900 ExprResult Arg = TheCall->getArg(i);
4901 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4902 if (Arg.isInvalid())
4903 return true;
4904 TheCall->setArg(i, Arg.get());
4905 }
4906
4907 // Permute the arguments into a 'consistent' order.
4908 SmallVector<Expr*, 5> SubExprs;
4909 SubExprs.push_back(Ptr);
4910 switch (Form) {
4911 case Init:
4912 // Note, AtomicExpr::getVal1() has a special case for this atomic.
4913 SubExprs.push_back(TheCall->getArg(1)); // Val1
4914 break;
4915 case Load:
4916 SubExprs.push_back(TheCall->getArg(1)); // Order
4917 break;
4918 case LoadCopy:
4919 case Copy:
4920 case Arithmetic:
4921 case Xchg:
4922 SubExprs.push_back(TheCall->getArg(2)); // Order
4923 SubExprs.push_back(TheCall->getArg(1)); // Val1
4924 break;
4925 case GNUXchg:
4926 // Note, AtomicExpr::getVal2() has a special case for this atomic.
4927 SubExprs.push_back(TheCall->getArg(3)); // Order
4928 SubExprs.push_back(TheCall->getArg(1)); // Val1
4929 SubExprs.push_back(TheCall->getArg(2)); // Val2
4930 break;
4931 case C11CmpXchg:
4932 SubExprs.push_back(TheCall->getArg(3)); // Order
4933 SubExprs.push_back(TheCall->getArg(1)); // Val1
4934 SubExprs.push_back(TheCall->getArg(4)); // OrderFail
4935 SubExprs.push_back(TheCall->getArg(2)); // Val2
4936 break;
4937 case GNUCmpXchg:
4938 SubExprs.push_back(TheCall->getArg(4)); // Order
4939 SubExprs.push_back(TheCall->getArg(1)); // Val1
4940 SubExprs.push_back(TheCall->getArg(5)); // OrderFail
4941 SubExprs.push_back(TheCall->getArg(2)); // Val2
4942 SubExprs.push_back(TheCall->getArg(3)); // Weak
4943 break;
4944 }
4945
4946 if (SubExprs.size() >= 2 && Form != Init) {
4947 llvm::APSInt Result(32);
4948 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
4949 !isValidOrderingForOp(Result.getSExtValue(), Op))
4950 Diag(SubExprs[1]->getBeginLoc(),
4951 diag::warn_atomic_op_has_invalid_memory_order)
4952 << SubExprs[1]->getSourceRange();
4953 }
4954
4955 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
4956 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
4957 llvm::APSInt Result(32);
4958 if (Scope->isIntegerConstantExpr(Result, Context) &&
4959 !ScopeModel->isValid(Result.getZExtValue())) {
4960 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
4961 << Scope->getSourceRange();
4962 }
4963 SubExprs.push_back(Scope);
4964 }
4965
4966 AtomicExpr *AE =
4967 new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs,
4968 ResultType, Op, TheCall->getRParenLoc());
4969
4970 if ((Op == AtomicExpr::AO__c11_atomic_load ||
4971 Op == AtomicExpr::AO__c11_atomic_store ||
4972 Op == AtomicExpr::AO__opencl_atomic_load ||
4973 Op == AtomicExpr::AO__opencl_atomic_store ) &&
4974 Context.AtomicUsesUnsupportedLibcall(AE))
4975 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
4976 << ((Op == AtomicExpr::AO__c11_atomic_load ||
4977 Op == AtomicExpr::AO__opencl_atomic_load)
4978 ? 0
4979 : 1);
4980
4981 return AE;
4982}
4983
4984/// checkBuiltinArgument - Given a call to a builtin function, perform
4985/// normal type-checking on the given argument, updating the call in
4986/// place. This is useful when a builtin function requires custom
4987/// type-checking for some of its arguments but not necessarily all of
4988/// them.
4989///
4990/// Returns true on error.
4991static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
4992 FunctionDecl *Fn = E->getDirectCallee();
4993 assert(Fn && "builtin call without direct callee!");
4994
4995 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
4996 InitializedEntity Entity =
4997 InitializedEntity::InitializeParameter(S.Context, Param);
4998
4999 ExprResult Arg = E->getArg(0);
5000 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
5001 if (Arg.isInvalid())
5002 return true;
5003
5004 E->setArg(ArgIndex, Arg.get());
5005 return false;
5006}
5007
5008/// We have a call to a function like __sync_fetch_and_add, which is an
5009/// overloaded function based on the pointer type of its first argument.
5010/// The main BuildCallExpr routines have already promoted the types of
5011/// arguments because all of these calls are prototyped as void(...).
5012///
5013/// This function goes through and does final semantic checking for these
5014/// builtins, as well as generating any warnings.
5015ExprResult
5016Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
5017 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
5018 Expr *Callee = TheCall->getCallee();
5019 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
5020 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5021
5022 // Ensure that we have at least one argument to do type inference from.
5023 if (TheCall->getNumArgs() < 1) {
5024 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5025 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
5026 return ExprError();
5027 }
5028
5029 // Inspect the first argument of the atomic builtin. This should always be
5030 // a pointer type, whose element is an integral scalar or pointer type.
5031 // Because it is a pointer type, we don't have to worry about any implicit
5032 // casts here.
5033 // FIXME: We don't allow floating point scalars as input.
5034 Expr *FirstArg = TheCall->getArg(0);
5035 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
5036 if (FirstArgResult.isInvalid())
5037 return ExprError();
5038 FirstArg = FirstArgResult.get();
5039 TheCall->setArg(0, FirstArg);
5040
5041 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
5042 if (!pointerType) {
5043 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
5044 << FirstArg->getType() << FirstArg->getSourceRange();
5045 return ExprError();
5046 }
5047 // XXXAR: disallow __sync builtins with capabilities for now
5048 // It would result in incorrect code generation because we would end up
5049 // using the _16 versions and generating i256 in the IR
5050 bool IsCapabilityAtomicOp = false;
5051 if (pointerType->getPointeeType()->isCHERICapabilityType(Context)) {
5052 IsCapabilityAtomicOp = true;
5053 switch (FDecl->getBuiltinID()) {
5054 case Builtin::BI__sync_bool_compare_and_swap:
5055 case Builtin::BI__sync_val_compare_and_swap:
5056 case Builtin::BI__sync_swap:
5057 break;
5058 default:
5059 // All other builtins generate broken code for now
5060 Diag(DRE->getBeginLoc(), diag::err_sync_atomic_builtin_with_capability)
5061 << pointerType->getPointeeType() << FirstArg->getSourceRange();
5062 return ExprError();
5063 }
5064 }
5065
5066 QualType ValType = pointerType->getPointeeType();
5067 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5068 !ValType->isBlockPointerType()) {
5069 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
5070 << FirstArg->getType() << FirstArg->getSourceRange();
5071 return ExprError();
5072 }
5073
5074 if (ValType.isConstQualified()) {
5075 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
5076 << FirstArg->getType() << FirstArg->getSourceRange();
5077 return ExprError();
5078 }
5079
5080 switch (ValType.getObjCLifetime()) {
5081 case Qualifiers::OCL_None:
5082 case Qualifiers::OCL_ExplicitNone:
5083 // okay
5084 break;
5085
5086 case Qualifiers::OCL_Weak:
5087 case Qualifiers::OCL_Strong:
5088 case Qualifiers::OCL_Autoreleasing:
5089 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
5090 << ValType << FirstArg->getSourceRange();
5091 return ExprError();
5092 }
5093
5094 // Strip any qualifiers off ValType.
5095 ValType = ValType.getUnqualifiedType();
5096
5097 // The majority of builtins return a value, but a few have special return
5098 // types, so allow them to override appropriately below.
5099 QualType ResultType = ValType;
5100
5101 // We need to figure out which concrete builtin this maps onto. For example,
5102 // __sync_fetch_and_add with a 2 byte object turns into
5103 // __sync_fetch_and_add_2.
5104#define BUILTIN_ROW(x) \
5105 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
5106 Builtin::BI##x##_8, Builtin::BI##x##_16 }
5107
5108 static const unsigned BuiltinIndices[][5] = {
5109 BUILTIN_ROW(__sync_fetch_and_add),
5110 BUILTIN_ROW(__sync_fetch_and_sub),
5111 BUILTIN_ROW(__sync_fetch_and_or),
5112 BUILTIN_ROW(__sync_fetch_and_and),
5113 BUILTIN_ROW(__sync_fetch_and_xor),
5114 BUILTIN_ROW(__sync_fetch_and_nand),
5115
5116 BUILTIN_ROW(__sync_add_and_fetch),
5117 BUILTIN_ROW(__sync_sub_and_fetch),
5118 BUILTIN_ROW(__sync_and_and_fetch),
5119 BUILTIN_ROW(__sync_or_and_fetch),
5120 BUILTIN_ROW(__sync_xor_and_fetch),
5121 BUILTIN_ROW(__sync_nand_and_fetch),
5122
5123 BUILTIN_ROW(__sync_val_compare_and_swap),
5124 BUILTIN_ROW(__sync_bool_compare_and_swap),
5125 BUILTIN_ROW(__sync_lock_test_and_set),
5126 BUILTIN_ROW(__sync_lock_release),
5127 BUILTIN_ROW(__sync_swap)
5128 };
5129#undef BUILTIN_ROW
5130
5131 // Determine the index of the size.
5132 unsigned SizeIndex;
5133 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
5134 case 1: SizeIndex = 0; break;
5135 case 2: SizeIndex = 1; break;
5136 case 4: SizeIndex = 2; break;
5137 case 8: SizeIndex = 3; break;
5138 case 16: SizeIndex = 4; break;
5139 default:
5140 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
5141 << FirstArg->getType() << FirstArg->getSourceRange();
5142 return ExprError();
5143 }
5144
5145 // Each of these builtins has one pointer argument, followed by some number of
5146 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
5147 // that we ignore. Find out which row of BuiltinIndices to read from as well
5148 // as the number of fixed args.
5149 unsigned BuiltinID = FDecl->getBuiltinID();
5150 unsigned BuiltinIndex, NumFixed = 1;
5151 bool WarnAboutSemanticsChange = false;
5152 switch (BuiltinID) {
5153 default: llvm_unreachable("Unknown overloaded atomic builtin!");
5154 case Builtin::BI__sync_fetch_and_add:
5155 case Builtin::BI__sync_fetch_and_add_1:
5156 case Builtin::BI__sync_fetch_and_add_2:
5157 case Builtin::BI__sync_fetch_and_add_4:
5158 case Builtin::BI__sync_fetch_and_add_8:
5159 case Builtin::BI__sync_fetch_and_add_16:
5160 BuiltinIndex = 0;
5161 break;
5162
5163 case Builtin::BI__sync_fetch_and_sub:
5164 case Builtin::BI__sync_fetch_and_sub_1:
5165 case Builtin::BI__sync_fetch_and_sub_2:
5166 case Builtin::BI__sync_fetch_and_sub_4:
5167 case Builtin::BI__sync_fetch_and_sub_8:
5168 case Builtin::BI__sync_fetch_and_sub_16:
5169 BuiltinIndex = 1;
5170 break;
5171
5172 case Builtin::BI__sync_fetch_and_or:
5173 case Builtin::BI__sync_fetch_and_or_1:
5174 case Builtin::BI__sync_fetch_and_or_2:
5175 case Builtin::BI__sync_fetch_and_or_4:
5176 case Builtin::BI__sync_fetch_and_or_8:
5177 case Builtin::BI__sync_fetch_and_or_16:
5178 BuiltinIndex = 2;
5179 break;
5180
5181 case Builtin::BI__sync_fetch_and_and:
5182 case Builtin::BI__sync_fetch_and_and_1:
5183 case Builtin::BI__sync_fetch_and_and_2:
5184 case Builtin::BI__sync_fetch_and_and_4:
5185 case Builtin::BI__sync_fetch_and_and_8:
5186 case Builtin::BI__sync_fetch_and_and_16:
5187 BuiltinIndex = 3;
5188 break;
5189
5190 case Builtin::BI__sync_fetch_and_xor:
5191 case Builtin::BI__sync_fetch_and_xor_1:
5192 case Builtin::BI__sync_fetch_and_xor_2:
5193 case Builtin::BI__sync_fetch_and_xor_4:
5194 case Builtin::BI__sync_fetch_and_xor_8:
5195 case Builtin::BI__sync_fetch_and_xor_16:
5196 BuiltinIndex = 4;
5197 break;
5198
5199 case Builtin::BI__sync_fetch_and_nand:
5200 case Builtin::BI__sync_fetch_and_nand_1:
5201 case Builtin::BI__sync_fetch_and_nand_2:
5202 case Builtin::BI__sync_fetch_and_nand_4:
5203 case Builtin::BI__sync_fetch_and_nand_8:
5204 case Builtin::BI__sync_fetch_and_nand_16:
5205 BuiltinIndex = 5;
5206 WarnAboutSemanticsChange = true;
5207 break;
5208
5209 case Builtin::BI__sync_add_and_fetch:
5210 case Builtin::BI__sync_add_and_fetch_1:
5211 case Builtin::BI__sync_add_and_fetch_2:
5212 case Builtin::BI__sync_add_and_fetch_4:
5213 case Builtin::BI__sync_add_and_fetch_8:
5214 case Builtin::BI__sync_add_and_fetch_16:
5215 BuiltinIndex = 6;
5216 break;
5217
5218 case Builtin::BI__sync_sub_and_fetch:
5219 case Builtin::BI__sync_sub_and_fetch_1:
5220 case Builtin::BI__sync_sub_and_fetch_2:
5221 case Builtin::BI__sync_sub_and_fetch_4:
5222 case Builtin::BI__sync_sub_and_fetch_8:
5223 case Builtin::BI__sync_sub_and_fetch_16:
5224 BuiltinIndex = 7;
5225 break;
5226
5227 case Builtin::BI__sync_and_and_fetch:
5228 case Builtin::BI__sync_and_and_fetch_1:
5229 case Builtin::BI__sync_and_and_fetch_2:
5230 case Builtin::BI__sync_and_and_fetch_4:
5231 case Builtin::BI__sync_and_and_fetch_8:
5232 case Builtin::BI__sync_and_and_fetch_16:
5233 BuiltinIndex = 8;
5234 break;
5235
5236 case Builtin::BI__sync_or_and_fetch:
5237 case Builtin::BI__sync_or_and_fetch_1:
5238 case Builtin::BI__sync_or_and_fetch_2:
5239 case Builtin::BI__sync_or_and_fetch_4:
5240 case Builtin::BI__sync_or_and_fetch_8:
5241 case Builtin::BI__sync_or_and_fetch_16:
5242 BuiltinIndex = 9;
5243 break;
5244
5245 case Builtin::BI__sync_xor_and_fetch:
5246 case Builtin::BI__sync_xor_and_fetch_1:
5247 case Builtin::BI__sync_xor_and_fetch_2:
5248 case Builtin::BI__sync_xor_and_fetch_4:
5249 case Builtin::BI__sync_xor_and_fetch_8:
5250 case Builtin::BI__sync_xor_and_fetch_16:
5251 BuiltinIndex = 10;
5252 break;
5253
5254 case Builtin::BI__sync_nand_and_fetch:
5255 case Builtin::BI__sync_nand_and_fetch_1:
5256 case Builtin::BI__sync_nand_and_fetch_2:
5257 case Builtin::BI__sync_nand_and_fetch_4:
5258 case Builtin::BI__sync_nand_and_fetch_8:
5259 case Builtin::BI__sync_nand_and_fetch_16:
5260 BuiltinIndex = 11;
5261 WarnAboutSemanticsChange = true;
5262 break;
5263
5264 case Builtin::BI__sync_val_compare_and_swap:
5265 case Builtin::BI__sync_val_compare_and_swap_1:
5266 case Builtin::BI__sync_val_compare_and_swap_2:
5267 case Builtin::BI__sync_val_compare_and_swap_4:
5268 case Builtin::BI__sync_val_compare_and_swap_8:
5269 case Builtin::BI__sync_val_compare_and_swap_16:
5270 BuiltinIndex = 12;
5271 NumFixed = 2;
5272 break;
5273
5274 case Builtin::BI__sync_bool_compare_and_swap:
5275 case Builtin::BI__sync_bool_compare_and_swap_1:
5276 case Builtin::BI__sync_bool_compare_and_swap_2:
5277 case Builtin::BI__sync_bool_compare_and_swap_4:
5278 case Builtin::BI__sync_bool_compare_and_swap_8:
5279 case Builtin::BI__sync_bool_compare_and_swap_16:
5280 BuiltinIndex = 13;
5281 NumFixed = 2;
5282 ResultType = Context.BoolTy;
5283 break;
5284
5285 case Builtin::BI__sync_lock_test_and_set:
5286 case Builtin::BI__sync_lock_test_and_set_1:
5287 case Builtin::BI__sync_lock_test_and_set_2:
5288 case Builtin::BI__sync_lock_test_and_set_4:
5289 case Builtin::BI__sync_lock_test_and_set_8:
5290 case Builtin::BI__sync_lock_test_and_set_16:
5291 BuiltinIndex = 14;
5292 break;
5293
5294 case Builtin::BI__sync_lock_release:
5295 case Builtin::BI__sync_lock_release_1:
5296 case Builtin::BI__sync_lock_release_2:
5297 case Builtin::BI__sync_lock_release_4:
5298 case Builtin::BI__sync_lock_release_8:
5299 case Builtin::BI__sync_lock_release_16:
5300 BuiltinIndex = 15;
5301 NumFixed = 0;
5302 ResultType = Context.VoidTy;
5303 break;
5304
5305 case Builtin::BI__sync_swap:
5306 case Builtin::BI__sync_swap_1:
5307 case Builtin::BI__sync_swap_2:
5308 case Builtin::BI__sync_swap_4:
5309 case Builtin::BI__sync_swap_8:
5310 case Builtin::BI__sync_swap_16:
5311 BuiltinIndex = 16;
5312 break;
5313 }
5314
5315 // Now that we know how many fixed arguments we expect, first check that we
5316 // have at least that many.
5317 if (TheCall->getNumArgs() < 1+NumFixed) {
5318 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5319 << 0 << 1 + NumFixed << TheCall->getNumArgs()
5320 << Callee->getSourceRange();
5321 return ExprError();
5322 }
5323
5324 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
5325 << Callee->getSourceRange();
5326
5327 if (WarnAboutSemanticsChange) {
5328 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
5329 << Callee->getSourceRange();
5330 }
5331
5332 // Get the decl for the concrete builtin from this, we can tell what the
5333 // concrete integer type we should convert to is.
5334 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
5335 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
5336 FunctionDecl *NewBuiltinDecl;
5337 if (NewBuiltinID == BuiltinID)
5338 NewBuiltinDecl = FDecl;
5339 else {
5340 // Perform builtin lookup to avoid redeclaring it.
5341 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
5342 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
5343 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
5344 assert(Res.getFoundDecl());
5345 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
5346 if (!NewBuiltinDecl)
5347 return ExprError();
5348 }
5349
5350 // The first argument --- the pointer --- has a fixed type; we
5351 // deduce the types of the rest of the arguments accordingly. Walk
5352 // the remaining arguments, converting them to the deduced value type.
5353 for (unsigned i = 0; i != NumFixed; ++i) {
5354 ExprResult Arg = TheCall->getArg(i+1);
5355
5356 // GCC does an implicit conversion to the pointer or integer ValType. This
5357 // can fail in some cases (1i -> int**), check for this error case now.
5358 // Initialize the argument.
5359 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
5360 ValType, /*consume*/ false);
5361 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5362 if (Arg.isInvalid())
5363 return ExprError();
5364
5365 // Okay, we have something that *can* be converted to the right type. Check
5366 // to see if there is a potentially weird extension going on here. This can
5367 // happen when you do an atomic operation on something like an char* and
5368 // pass in 42. The 42 gets converted to char. This is even more strange
5369 // for things like 45.123 -> char, etc.
5370 // FIXME: Do this check.
5371 TheCall->setArg(i+1, Arg.get());
5372 }
5373
5374 // Create a new DeclRefExpr to refer to the new decl.
5375 DeclRefExpr *NewDRE = DeclRefExpr::Create(
5376 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
5377 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
5378 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
5379
5380 // Set the callee in the CallExpr.
5381 // FIXME: This loses syntactic information.
5382 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
5383 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
5384 CK_BuiltinFnToFnPtr);
5385 TheCall->setCallee(PromotedCall.get());
5386
5387 // Change the result type of the call to match the original value type. This
5388 // is arbitrary, but the codegen for these builtins ins design to handle it
5389 // gracefully.
5390 TheCall->setType(ResultType);
5391
5392 return TheCallResult;
5393}
5394
5395/// SemaBuiltinNontemporalOverloaded - We have a call to
5396/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
5397/// overloaded function based on the pointer type of its last argument.
5398///
5399/// This function goes through and does final semantic checking for these
5400/// builtins.
5401ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
5402 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
5403 DeclRefExpr *DRE =
5404 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5405 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5406 unsigned BuiltinID = FDecl->getBuiltinID();
5407 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
5408 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
5409 "Unexpected nontemporal load/store builtin!");
5410 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
5411 unsigned numArgs = isStore ? 2 : 1;
5412
5413 // Ensure that we have the proper number of arguments.
5414 if (checkArgCount(*this, TheCall, numArgs))
5415 return ExprError();
5416
5417 // Inspect the last argument of the nontemporal builtin. This should always
5418 // be a pointer type, from which we imply the type of the memory access.
5419 // Because it is a pointer type, we don't have to worry about any implicit
5420 // casts here.
5421 Expr *PointerArg = TheCall->getArg(numArgs - 1);
5422 ExprResult PointerArgResult =
5423 DefaultFunctionArrayLvalueConversion(PointerArg);
5424
5425 if (PointerArgResult.isInvalid())
5426 return ExprError();
5427 PointerArg = PointerArgResult.get();
5428 TheCall->setArg(numArgs - 1, PointerArg);
5429
5430 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
5431 if (!pointerType) {
5432 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
5433 << PointerArg->getType() << PointerArg->getSourceRange();
5434 return ExprError();
5435 }
5436
5437 QualType ValType = pointerType->getPointeeType();
5438
5439 // Strip any qualifiers off ValType.
5440 ValType = ValType.getUnqualifiedType();
5441 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5442 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
5443 !ValType->isVectorType()) {
5444 Diag(DRE->getBeginLoc(),
5445 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
5446 << PointerArg->getType() << PointerArg->getSourceRange();
5447 return ExprError();
5448 }
5449
5450 if (!isStore) {
5451 TheCall->setType(ValType);
5452 return TheCallResult;
5453 }
5454
5455 ExprResult ValArg = TheCall->getArg(0);
5456 InitializedEntity Entity = InitializedEntity::InitializeParameter(
5457 Context, ValType, /*consume*/ false);
5458 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
5459 if (ValArg.isInvalid())
5460 return ExprError();
5461
5462 TheCall->setArg(0, ValArg.get());
5463 TheCall->setType(Context.VoidTy);
5464 return TheCallResult;
5465}
5466
5467/// CheckObjCString - Checks that the argument to the builtin
5468/// CFString constructor is correct
5469/// Note: It might also make sense to do the UTF-16 conversion here (would
5470/// simplify the backend).
5471bool Sema::CheckObjCString(Expr *Arg) {
5472 Arg = Arg->IgnoreParenCasts();
5473 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
5474
5475 if (!Literal || !Literal->isAscii()) {
5476 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
5477 << Arg->getSourceRange();
5478 return true;
5479 }
5480
5481 if (Literal->containsNonAsciiOrNull()) {
5482 StringRef String = Literal->getString();
5483 unsigned NumBytes = String.size();
5484 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
5485 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5486 llvm::UTF16 *ToPtr = &ToBuf[0];
5487
5488 llvm::ConversionResult Result =
5489 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5490 ToPtr + NumBytes, llvm::strictConversion);
5491 // Check for conversion failure.
5492 if (Result != llvm::conversionOK)
5493 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
5494 << Arg->getSourceRange();
5495 }
5496 return false;
5497}
5498
5499/// CheckObjCString - Checks that the format string argument to the os_log()
5500/// and os_trace() functions is correct, and converts it to const char *.
5501ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
5502 Arg = Arg->IgnoreParenCasts();
5503 auto *Literal = dyn_cast<StringLiteral>(Arg);
5504 if (!Literal) {
5505 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
5506 Literal = ObjcLiteral->getString();
5507 }
5508 }
5509
5510 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
5511 return ExprError(
5512 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
5513 << Arg->getSourceRange());
5514 }
5515
5516 ExprResult Result(Literal);
5517 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
5518 InitializedEntity Entity =
5519 InitializedEntity::InitializeParameter(Context, ResultTy, false);
5520 Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
5521 return Result;
5522}
5523
5524/// Check that the user is calling the appropriate va_start builtin for the
5525/// target and calling convention.
5526static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
5527 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
5528 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
5529 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64;
5530 bool IsWindows = TT.isOSWindows();
5531 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
5532 if (IsX64 || IsAArch64) {
5533 CallingConv CC = CC_C;
5534 if (const FunctionDecl *FD = S.getCurFunctionDecl())
5535 CC = FD->getType()->getAs<FunctionType>()->getCallConv();
5536 if (IsMSVAStart) {
5537 // Don't allow this in System V ABI functions.
5538 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
5539 return S.Diag(Fn->getBeginLoc(),
5540 diag::err_ms_va_start_used_in_sysv_function);
5541 } else {
5542 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
5543 // On x64 Windows, don't allow this in System V ABI functions.
5544 // (Yes, that means there's no corresponding way to support variadic
5545 // System V ABI functions on Windows.)
5546 if ((IsWindows && CC == CC_X86_64SysV) ||
5547 (!IsWindows && CC == CC_Win64))
5548 return S.Diag(Fn->getBeginLoc(),
5549 diag::err_va_start_used_in_wrong_abi_function)
5550 << !IsWindows;
5551 }
5552 return false;
5553 }
5554
5555 if (IsMSVAStart)
5556 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
5557 return false;
5558}
5559
5560static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
5561 ParmVarDecl **LastParam = nullptr) {
5562 // Determine whether the current function, block, or obj-c method is variadic
5563 // and get its parameter list.
5564 bool IsVariadic = false;
5565 ArrayRef<ParmVarDecl *> Params;
5566 DeclContext *Caller = S.CurContext;
5567 if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
5568 IsVariadic = Block->isVariadic();
5569 Params = Block->parameters();
5570 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
5571 IsVariadic = FD->isVariadic();
5572 Params = FD->parameters();
5573 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
5574 IsVariadic = MD->isVariadic();
5575 // FIXME: This isn't correct for methods (results in bogus warning).
5576 Params = MD->parameters();
5577 } else if (isa<CapturedDecl>(Caller)) {
5578 // We don't support va_start in a CapturedDecl.
5579 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
5580 return true;
5581 } else {
5582 // This must be some other declcontext that parses exprs.
5583 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
5584 return true;
5585 }
5586
5587 if (!IsVariadic) {
5588 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
5589 return true;
5590 }
5591
5592 if (LastParam)
5593 *LastParam = Params.empty() ? nullptr : Params.back();
5594
5595 return false;
5596}
5597
5598/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
5599/// for validity. Emit an error and return true on failure; return false
5600/// on success.
5601bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
5602 Expr *Fn = TheCall->getCallee();
5603
5604 if (checkVAStartABI(*this, BuiltinID, Fn))
5605 return true;
5606
5607 if (TheCall->getNumArgs() > 2) {
5608 Diag(TheCall->getArg(2)->getBeginLoc(),
5609 diag::err_typecheck_call_too_many_args)
5610 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5611 << Fn->getSourceRange()
5612 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
5613 (*(TheCall->arg_end() - 1))->getEndLoc());
5614 return true;
5615 }
5616
5617 if (TheCall->getNumArgs() < 2) {
5618 return Diag(TheCall->getEndLoc(),
5619 diag::err_typecheck_call_too_few_args_at_least)
5620 << 0 /*function call*/ << 2 << TheCall->getNumArgs();
5621 }
5622
5623 // Type-check the first argument normally.
5624 if (checkBuiltinArgument(*this, TheCall, 0))
5625 return true;
5626
5627 // Check that the current function is variadic, and get its last parameter.
5628 ParmVarDecl *LastParam;
5629 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
5630 return true;
5631
5632 // Verify that the second argument to the builtin is the last argument of the
5633 // current function or method.
5634 bool SecondArgIsLastNamedArgument = false;
5635 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
5636
5637 // These are valid if SecondArgIsLastNamedArgument is false after the next
5638 // block.
5639 QualType Type;
5640 SourceLocation ParamLoc;
5641 bool IsCRegister = false;
5642
5643 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
5644 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
5645 SecondArgIsLastNamedArgument = PV == LastParam;
5646
5647 Type = PV->getType();
5648 ParamLoc = PV->getLocation();
5649 IsCRegister =
5650 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
5651 }
5652 }
5653
5654 if (!SecondArgIsLastNamedArgument)
5655 Diag(TheCall->getArg(1)->getBeginLoc(),
5656 diag::warn_second_arg_of_va_start_not_last_named_param);
5657 else if (IsCRegister || Type->isReferenceType() ||
5658 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
5659 // Promotable integers are UB, but enumerations need a bit of
5660 // extra checking to see what their promotable type actually is.
5661 if (!Type->isPromotableIntegerType())
5662 return false;
5663 if (!Type->isEnumeralType())
5664 return true;
5665 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
5666 return !(ED &&
5667 Context.typesAreCompatible(ED->getPromotionType(), Type));
5668 }()) {
5669 unsigned Reason = 0;
5670 if (Type->isReferenceType()) Reason = 1;
5671 else if (IsCRegister) Reason = 2;
5672 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
5673 Diag(ParamLoc, diag::note_parameter_type) << Type;
5674 }
5675
5676 TheCall->setType(Context.VoidTy);
5677 return false;
5678}
5679
5680bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
5681 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
5682 // const char *named_addr);
5683
5684 Expr *Func = Call->getCallee();
5685
5686 if (Call->getNumArgs() < 3)
5687 return Diag(Call->getEndLoc(),
5688 diag::err_typecheck_call_too_few_args_at_least)
5689 << 0 /*function call*/ << 3 << Call->getNumArgs();
5690
5691 // Type-check the first argument normally.
5692 if (checkBuiltinArgument(*this, Call, 0))
5693 return true;
5694
5695 // Check that the current function is variadic.
5696 if (checkVAStartIsInVariadicFunction(*this, Func))
5697 return true;
5698
5699 // __va_start on Windows does not validate the parameter qualifiers
5700
5701 const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
5702 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
5703
5704 const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
5705 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
5706
5707 const QualType &ConstCharPtrTy =
5708 Context.getPointerType(Context.CharTy.withConst());
5709 if (!Arg1Ty->isPointerType() ||
5710 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
5711 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
5712 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
5713 << 0 /* qualifier difference */
5714 << 3 /* parameter mismatch */
5715 << 2 << Arg1->getType() << ConstCharPtrTy;
5716
5717 const QualType SizeTy = Context.getSizeType();
5718 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
5719 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
5720 << Arg2->getType() << SizeTy << 1 /* different class */
5721 << 0 /* qualifier difference */
5722 << 3 /* parameter mismatch */
5723 << 3 << Arg2->getType() << SizeTy;
5724
5725 return false;
5726}
5727
5728/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
5729/// friends. This is declared to take (...), so we have to check everything.
5730bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
5731 if (TheCall->getNumArgs() < 2)
5732 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
5733 << 0 << 2 << TheCall->getNumArgs() /*function call*/;
5734 if (TheCall->getNumArgs() > 2)
5735 return Diag(TheCall->getArg(2)->getBeginLoc(),
5736 diag::err_typecheck_call_too_many_args)
5737 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5738 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
5739 (*(TheCall->arg_end() - 1))->getEndLoc());
5740
5741 ExprResult OrigArg0 = TheCall->getArg(0);
5742 ExprResult OrigArg1 = TheCall->getArg(1);
5743
5744 // Do standard promotions between the two arguments, returning their common
5745 // type.
5746 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
5747 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
5748 return true;
5749
5750 // Make sure any conversions are pushed back into the call; this is
5751 // type safe since unordered compare builtins are declared as "_Bool
5752 // foo(...)".
5753 TheCall->setArg(0, OrigArg0.get());
5754 TheCall->setArg(1, OrigArg1.get());
5755
5756 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
5757 return false;
5758
5759 // If the common type isn't a real floating type, then the arguments were
5760 // invalid for this operation.
5761 if (Res.isNull() || !Res->isRealFloatingType())
5762 return Diag(OrigArg0.get()->getBeginLoc(),
5763 diag::err_typecheck_call_invalid_ordered_compare)
5764 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
5765 << SourceRange(OrigArg0.get()->getBeginLoc(),
5766 OrigArg1.get()->getEndLoc());
5767
5768 return false;
5769}
5770
5771/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
5772/// __builtin_isnan and friends. This is declared to take (...), so we have
5773/// to check everything. We expect the last argument to be a floating point
5774/// value.
5775bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
5776 if (TheCall->getNumArgs() < NumArgs)
5777 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
5778 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/;
5779 if (TheCall->getNumArgs() > NumArgs)
5780 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(),
5781 diag::err_typecheck_call_too_many_args)
5782 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
5783 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(),
5784 (*(TheCall->arg_end() - 1))->getEndLoc());
5785
5786 Expr *OrigArg = TheCall->getArg(NumArgs-1);
5787
5788 if (OrigArg->isTypeDependent())
5789 return false;
5790
5791 // This operation requires a non-_Complex floating-point number.
5792 if (!OrigArg->getType()->isRealFloatingType())
5793 return Diag(OrigArg->getBeginLoc(),
5794 diag::err_typecheck_call_invalid_unary_fp)
5795 << OrigArg->getType() << OrigArg->getSourceRange();
5796
5797 // If this is an implicit conversion from float -> float, double, or
5798 // long double, remove it.
5799 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
5800 // Only remove standard FloatCasts, leaving other casts inplace
5801 if (Cast->getCastKind() == CK_FloatingCast) {
5802 Expr *CastArg = Cast->getSubExpr();
5803 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
5804 assert(
5805 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
5806 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) ||
5807 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) &&
5808 "promotion from float to either float, double, or long double is "
5809 "the only expected cast here");
5810 Cast->setSubExpr(nullptr);
5811 TheCall->setArg(NumArgs-1, CastArg);
5812 }
5813 }
5814 }
5815
5816 return false;
5817}
5818
5819// Customized Sema Checking for VSX builtins that have the following signature:
5820// vector [...] builtinName(vector [...], vector [...], const int);
5821// Which takes the same type of vectors (any legal vector type) for the first
5822// two arguments and takes compile time constant for the third argument.
5823// Example builtins are :
5824// vector double vec_xxpermdi(vector double, vector double, int);
5825// vector short vec_xxsldwi(vector short, vector short, int);
5826bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
5827 unsigned ExpectedNumArgs = 3;
5828 if (TheCall->getNumArgs() < ExpectedNumArgs)
5829 return Diag(TheCall->getEndLoc(),
5830 diag::err_typecheck_call_too_few_args_at_least)
5831 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
5832 << TheCall->getSourceRange();
5833
5834 if (TheCall->getNumArgs() > ExpectedNumArgs)
5835 return Diag(TheCall->getEndLoc(),
5836 diag::err_typecheck_call_too_many_args_at_most)
5837 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
5838 << TheCall->getSourceRange();
5839
5840 // Check the third argument is a compile time constant
5841 llvm::APSInt Value;
5842 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
5843 return Diag(TheCall->getBeginLoc(),
5844 diag::err_vsx_builtin_nonconstant_argument)
5845 << 3 /* argument index */ << TheCall->getDirectCallee()
5846 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
5847 TheCall->getArg(2)->getEndLoc());
5848
5849 QualType Arg1Ty = TheCall->getArg(0)->getType();
5850 QualType Arg2Ty = TheCall->getArg(1)->getType();
5851
5852 // Check the type of argument 1 and argument 2 are vectors.
5853 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
5854 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
5855 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
5856 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
5857 << TheCall->getDirectCallee()
5858 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5859 TheCall->getArg(1)->getEndLoc());
5860 }
5861
5862 // Check the first two arguments are the same type.
5863 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
5864 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
5865 << TheCall->getDirectCallee()
5866 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5867 TheCall->getArg(1)->getEndLoc());
5868 }
5869
5870 // When default clang type checking is turned off and the customized type
5871 // checking is used, the returning type of the function must be explicitly
5872 // set. Otherwise it is _Bool by default.
5873 TheCall->setType(Arg1Ty);
5874
5875 return false;
5876}
5877
5878/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
5879// This is declared to take (...), so we have to check everything.
5880ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
5881 if (TheCall->getNumArgs() < 2)
5882 return ExprError(Diag(TheCall->getEndLoc(),
5883 diag::err_typecheck_call_too_few_args_at_least)
5884 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5885 << TheCall->getSourceRange());
5886
5887 // Determine which of the following types of shufflevector we're checking:
5888 // 1) unary, vector mask: (lhs, mask)
5889 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
5890 QualType resType = TheCall->getArg(0)->getType();
5891 unsigned numElements = 0;
5892
5893 if (!TheCall->getArg(0)->isTypeDependent() &&
5894 !TheCall->getArg(1)->isTypeDependent()) {
5895 QualType LHSType = TheCall->getArg(0)->getType();
5896 QualType RHSType = TheCall->getArg(1)->getType();
5897
5898 if (!LHSType->isVectorType() || !RHSType->isVectorType())
5899 return ExprError(
5900 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
5901 << TheCall->getDirectCallee()
5902 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5903 TheCall->getArg(1)->getEndLoc()));
5904
5905 numElements = LHSType->getAs<VectorType>()->getNumElements();
5906 unsigned numResElements = TheCall->getNumArgs() - 2;
5907
5908 // Check to see if we have a call with 2 vector arguments, the unary shuffle
5909 // with mask. If so, verify that RHS is an integer vector type with the
5910 // same number of elts as lhs.
5911 if (TheCall->getNumArgs() == 2) {
5912 if (!RHSType->hasIntegerRepresentation() ||
5913 RHSType->getAs<VectorType>()->getNumElements() != numElements)
5914 return ExprError(Diag(TheCall->getBeginLoc(),
5915 diag::err_vec_builtin_incompatible_vector)
5916 << TheCall->getDirectCallee()
5917 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
5918 TheCall->getArg(1)->getEndLoc()));
5919 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
5920 return ExprError(Diag(TheCall->getBeginLoc(),
5921 diag::err_vec_builtin_incompatible_vector)
5922 << TheCall->getDirectCallee()
5923 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5924 TheCall->getArg(1)->getEndLoc()));
5925 } else if (numElements != numResElements) {
5926 QualType eltType = LHSType->getAs<VectorType>()->getElementType();
5927 resType = Context.getVectorType(eltType, numResElements,
5928 VectorType::GenericVector);
5929 }
5930 }
5931
5932 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
5933 if (TheCall->getArg(i)->isTypeDependent() ||
5934 TheCall->getArg(i)->isValueDependent())
5935 continue;
5936
5937 llvm::APSInt Result(32);
5938 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
5939 return ExprError(Diag(TheCall->getBeginLoc(),
5940 diag::err_shufflevector_nonconstant_argument)
5941 << TheCall->getArg(i)->getSourceRange());
5942
5943 // Allow -1 which will be translated to undef in the IR.
5944 if (Result.isSigned() && Result.isAllOnesValue())
5945 continue;
5946
5947 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
5948 return ExprError(Diag(TheCall->getBeginLoc(),
5949 diag::err_shufflevector_argument_too_large)
5950 << TheCall->getArg(i)->getSourceRange());
5951 }
5952
5953 SmallVector<Expr*, 32> exprs;
5954
5955 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
5956 exprs.push_back(TheCall->getArg(i));
5957 TheCall->setArg(i, nullptr);
5958 }
5959
5960 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
5961 TheCall->getCallee()->getBeginLoc(),
5962 TheCall->getRParenLoc());
5963}
5964
5965/// SemaConvertVectorExpr - Handle __builtin_convertvector
5966ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
5967 SourceLocation BuiltinLoc,
5968 SourceLocation RParenLoc) {
5969 ExprValueKind VK = VK_RValue;
5970 ExprObjectKind OK = OK_Ordinary;
5971 QualType DstTy = TInfo->getType();
5972 QualType SrcTy = E->getType();
5973
5974 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
5975 return ExprError(Diag(BuiltinLoc,
5976 diag::err_convertvector_non_vector)
5977 << E->getSourceRange());
5978 if (!DstTy->isVectorType() && !DstTy->isDependentType())
5979 return ExprError(Diag(BuiltinLoc,
5980 diag::err_convertvector_non_vector_type));
5981
5982 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
5983 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements();
5984 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements();
5985 if (SrcElts != DstElts)
5986 return ExprError(Diag(BuiltinLoc,
5987 diag::err_convertvector_incompatible_vector)
5988 << E->getSourceRange());
5989 }
5990
5991 return new (Context)
5992 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
5993}
5994
5995/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
5996// This is declared to take (const void*, ...) and can take two
5997// optional constant int args.
5998bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
5999 unsigned NumArgs = TheCall->getNumArgs();
6000
6001 if (NumArgs > 3)
6002 return Diag(TheCall->getEndLoc(),
6003 diag::err_typecheck_call_too_many_args_at_most)
6004 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
6005
6006 // Argument 0 is checked for us and the remaining arguments must be
6007 // constant integers.
6008 for (unsigned i = 1; i != NumArgs; ++i)
6009 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
6010 return true;
6011
6012 return false;
6013}
6014
6015/// SemaBuiltinAssume - Handle __assume (MS Extension).
6016// __assume does not evaluate its arguments, and should warn if its argument
6017// has side effects.
6018bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
6019 Expr *Arg = TheCall->getArg(0);
6020 if (Arg->isInstantiationDependent()) return false;
6021
6022 if (Arg->HasSideEffects(Context))
6023 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
6024 << Arg->getSourceRange()
6025 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
6026
6027 return false;
6028}
6029
6030/// Handle __builtin_alloca_with_align. This is declared
6031/// as (size_t, size_t) where the second size_t must be a power of 2 greater
6032/// than 8.
6033bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
6034 // The alignment must be a constant integer.
6035 Expr *Arg = TheCall->getArg(1);
6036
6037 // We can't check the value of a dependent argument.
6038 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
6039 if (const auto *UE =
6040 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
6041 if (UE->getKind() == UETT_AlignOf ||
6042 UE->getKind() == UETT_PreferredAlignOf)
6043 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
6044 << Arg->getSourceRange();
6045
6046 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
6047
6048 if (!Result.isPowerOf2())
6049 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
6050 << Arg->getSourceRange();
6051
6052 if (Result < Context.getCharWidth())
6053 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
6054 << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
6055
6056 if (Result > std::numeric_limits<int32_t>::max())
6057 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
6058 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
6059 }
6060
6061 return false;
6062}
6063
6064/// Handle __builtin_assume_aligned. This is declared
6065/// as (const void*, size_t, ...) and can take one optional constant int arg.
6066bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
6067 unsigned NumArgs = TheCall->getNumArgs();
6068
6069 if (NumArgs > 3)
6070 return Diag(TheCall->getEndLoc(),
6071 diag::err_typecheck_call_too_many_args_at_most)
6072 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
6073
6074 // The alignment must be a constant integer.
6075 Expr *Arg = TheCall->getArg(1);
6076
6077 // We can't check the value of a dependent argument.
6078 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
6079 llvm::APSInt Result;
6080 if (SemaBuiltinConstantArg(TheCall, 1, Result))
6081 return true;
6082
6083 if (!Result.isPowerOf2())
6084 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
6085 << Arg->getSourceRange();
6086 }
6087
6088 if (NumArgs > 2) {
6089 ExprResult Arg(TheCall->getArg(2));
6090 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
6091 Context.getSizeType(), false);
6092 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
6093 if (Arg.isInvalid()) return true;
6094 TheCall->setArg(2, Arg.get());
6095 }
6096
6097 return false;
6098}
6099
6100bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
6101 unsigned BuiltinID =
6102 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
6103 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
6104
6105 unsigned NumArgs = TheCall->getNumArgs();
6106 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
6107 if (NumArgs < NumRequiredArgs) {
6108 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
6109 << 0 /* function call */ << NumRequiredArgs << NumArgs
6110 << TheCall->getSourceRange();
6111 }
6112 if (NumArgs >= NumRequiredArgs + 0x100) {
6113 return Diag(TheCall->getEndLoc(),
6114 diag::err_typecheck_call_too_many_args_at_most)
6115 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
6116 << TheCall->getSourceRange();
6117 }
6118 unsigned i = 0;
6119
6120 // For formatting call, check buffer arg.
6121 if (!IsSizeCall) {
6122 ExprResult Arg(TheCall->getArg(i));
6123 InitializedEntity Entity = InitializedEntity::InitializeParameter(
6124 Context, Context.VoidPtrTy, false);
6125 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
6126 if (Arg.isInvalid())
6127 return true;
6128 TheCall->setArg(i, Arg.get());
6129 i++;
6130 }
6131
6132 // Check string literal arg.
6133 unsigned FormatIdx = i;
6134 {
6135 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
6136 if (Arg.isInvalid())
6137 return true;
6138 TheCall->setArg(i, Arg.get());
6139 i++;
6140 }
6141
6142 // Make sure variadic args are scalar.
6143 unsigned FirstDataArg = i;
6144 while (i < NumArgs) {
6145 ExprResult Arg = DefaultVariadicArgumentPromotion(
6146 TheCall->getArg(i), VariadicFunction, nullptr);
6147 if (Arg.isInvalid())
6148 return true;
6149 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
6150 if (ArgSize.getQuantity() >= 0x100) {
6151 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
6152 << i << (int)ArgSize.getQuantity() << 0xff
6153 << TheCall->getSourceRange();
6154 }
6155 TheCall->setArg(i, Arg.get());
6156 i++;
6157 }
6158
6159 // Check formatting specifiers. NOTE: We're only doing this for the non-size
6160 // call to avoid duplicate diagnostics.
6161 if (!IsSizeCall) {
6162 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
6163 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
6164 bool Success = CheckFormatArguments(
6165 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
6166 VariadicFunction, TheCall->getBeginLoc(), SourceRange(),
6167 CheckedVarArgs);
6168 if (!Success)
6169 return true;
6170 }
6171
6172 if (IsSizeCall) {
6173 TheCall->setType(Context.getSizeType());
6174 } else {
6175 TheCall->setType(Context.VoidPtrTy);
6176 }
6177 return false;
6178}
6179
6180/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
6181/// TheCall is a constant expression.
6182bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
6183 llvm::APSInt &Result) {
6184 Expr *Arg = TheCall->getArg(ArgNum);
6185 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
6186 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
6187
6188 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
6189
6190 if (!Arg->isIntegerConstantExpr(Result, Context))
6191 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
6192 << FDecl->getDeclName() << Arg->getSourceRange();
6193
6194 return false;
6195}
6196
6197/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
6198/// TheCall is a constant expression in the range [Low, High].
6199bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
6200 int Low, int High, bool RangeIsError) {
6201 llvm::APSInt Result;
6202
6203 // We can't check the value of a dependent argument.
6204 Expr *Arg = TheCall->getArg(ArgNum);
6205 if (Arg->isTypeDependent() || Arg->isValueDependent())
6206 return false;
6207
6208 // Check constant-ness first.
6209 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6210 return true;
6211
6212 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
6213 if (RangeIsError)
6214 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
6215 << Result.toString(10) << Low << High << Arg->getSourceRange();
6216 else
6217 // Defer the warning until we know if the code will be emitted so that
6218 // dead code can ignore this.
6219 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
6220 PDiag(diag::warn_argument_invalid_range)
6221 << Result.toString(10) << Low << High
6222 << Arg->getSourceRange());
6223 }
6224
6225 return false;
6226}
6227
6228/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
6229/// TheCall is a constant expression is a multiple of Num..
6230bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
6231 unsigned Num) {
6232 llvm::APSInt Result;
6233
6234 // We can't check the value of a dependent argument.
6235 Expr *Arg = TheCall->getArg(ArgNum);
6236 if (Arg->isTypeDependent() || Arg->isValueDependent())
6237 return false;
6238
6239 // Check constant-ness first.
6240 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6241 return true;
6242
6243 if (Result.getSExtValue() % Num != 0)
6244 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
6245 << Num << Arg->getSourceRange();
6246
6247 return false;
6248}
6249
6250/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
6251bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
6252 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
6253 if (checkArgCount(*this, TheCall, 2))
6254 return true;
6255 Expr *Arg0 = TheCall->getArg(0);
6256 Expr *Arg1 = TheCall->getArg(1);
6257
6258 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6259 if (FirstArg.isInvalid())
6260 return true;
6261 QualType FirstArgType = FirstArg.get()->getType();
6262 if (!FirstArgType->isAnyPointerType())
6263 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6264 << "first" << FirstArgType << Arg0->getSourceRange();
6265 TheCall->setArg(0, FirstArg.get());
6266
6267 ExprResult SecArg = DefaultLvalueConversion(Arg1);
6268 if (SecArg.isInvalid())
6269 return true;
6270 QualType SecArgType = SecArg.get()->getType();
6271 if (!SecArgType->isIntegerType())
6272 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
6273 << "second" << SecArgType << Arg1->getSourceRange();
6274
6275 // Derive the return type from the pointer argument.
6276 TheCall->setType(FirstArgType);
6277 return false;
6278 }
6279
6280 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
6281 if (checkArgCount(*this, TheCall, 2))
6282 return true;
6283
6284 Expr *Arg0 = TheCall->getArg(0);
6285 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6286 if (FirstArg.isInvalid())
6287 return true;
6288 QualType FirstArgType = FirstArg.get()->getType();
6289 if (!FirstArgType->isAnyPointerType())
6290 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6291 << "first" << FirstArgType << Arg0->getSourceRange();
6292 TheCall->setArg(0, FirstArg.get());
6293
6294 // Derive the return type from the pointer argument.
6295 TheCall->setType(FirstArgType);
6296
6297 // Second arg must be an constant in range [0,15]
6298 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
6299 }
6300
6301 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
6302 if (checkArgCount(*this, TheCall, 2))
6303 return true;
6304 Expr *Arg0 = TheCall->getArg(0);
6305 Expr *Arg1 = TheCall->getArg(1);
6306
6307 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6308 if (FirstArg.isInvalid())
6309 return true;
6310 QualType FirstArgType = FirstArg.get()->getType();
6311 if (!FirstArgType->isAnyPointerType())
6312 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6313 << "first" << FirstArgType << Arg0->getSourceRange();
6314
6315 QualType SecArgType = Arg1->getType();
6316 if (!SecArgType->isIntegerType())
6317 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
6318 << "second" << SecArgType << Arg1->getSourceRange();
6319 TheCall->setType(Context.IntTy);
6320 return false;
6321 }
6322
6323 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
6324 BuiltinID == AArch64::BI__builtin_arm_stg) {
6325 if (checkArgCount(*this, TheCall, 1))
6326 return true;
6327 Expr *Arg0 = TheCall->getArg(0);
6328 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6329 if (FirstArg.isInvalid())
6330 return true;
6331
6332 QualType FirstArgType = FirstArg.get()->getType();
6333 if (!FirstArgType->isAnyPointerType())
6334 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6335 << "first" << FirstArgType << Arg0->getSourceRange();
6336 TheCall->setArg(0, FirstArg.get());
6337
6338 // Derive the return type from the pointer argument.
6339 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
6340 TheCall->setType(FirstArgType);
6341 return false;
6342 }
6343
6344 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
6345 Expr *ArgA = TheCall->getArg(0);
6346 Expr *ArgB = TheCall->getArg(1);
6347
6348 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
6349 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
6350
6351 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
6352 return true;
6353
6354 QualType ArgTypeA = ArgExprA.get()->getType();
6355 QualType ArgTypeB = ArgExprB.get()->getType();
6356
6357 auto isNull = [&] (Expr *E) -> bool {
6358 return E->isNullPointerConstant(
6359 Context, Expr::NPC_ValueDependentIsNotNull); };
6360
6361 // argument should be either a pointer or null
6362 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
6363 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
6364 << "first" << ArgTypeA << ArgA->getSourceRange();
6365
6366 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
6367 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
6368 << "second" << ArgTypeB << ArgB->getSourceRange();
6369
6370 // Ensure Pointee types are compatible
6371 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
6372 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
6373 QualType pointeeA = ArgTypeA->getPointeeType();
6374 QualType pointeeB = ArgTypeB->getPointeeType();
6375 if (!Context.typesAreCompatible(
6376 Context.getCanonicalType(pointeeA).getUnqualifiedType(),
6377 Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
6378 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
6379 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
6380 << ArgB->getSourceRange();
6381 }
6382 }
6383
6384 // at least one argument should be pointer type
6385 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
6386 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
6387 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
6388
6389 if (isNull(ArgA)) // adopt type of the other pointer
6390 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
6391
6392 if (isNull(ArgB))
6393 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
6394
6395 TheCall->setArg(0, ArgExprA.get());
6396 TheCall->setArg(1, ArgExprB.get());
6397 TheCall->setType(Context.LongLongTy);
6398 return false;
6399 }
6400 assert(false && "Unhandled ARM MTE intrinsic");
6401 return true;
6402}
6403
6404/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
6405/// TheCall is an ARM/AArch64 special register string literal.
6406bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
6407 int ArgNum, unsigned ExpectedFieldNum,
6408 bool AllowName) {
6409 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6410 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6411 BuiltinID == ARM::BI__builtin_arm_rsr ||
6412 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6413 BuiltinID == ARM::BI__builtin_arm_wsr ||
6414 BuiltinID == ARM::BI__builtin_arm_wsrp;
6415 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
6416 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
6417 BuiltinID == AArch64::BI__builtin_arm_rsr ||
6418 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
6419 BuiltinID == AArch64::BI__builtin_arm_wsr ||
6420 BuiltinID == AArch64::BI__builtin_arm_wsrp;
6421 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
6422
6423 // We can't check the value of a dependent argument.
6424 Expr *Arg = TheCall->getArg(ArgNum);
6425 if (Arg->isTypeDependent() || Arg->isValueDependent())
6426 return false;
6427
6428 // Check if the argument is a string literal.
6429 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
6430 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
6431 << Arg->getSourceRange();
6432
6433 // Check the type of special register given.
6434 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
6435 SmallVector<StringRef, 6> Fields;
6436 Reg.split(Fields, ":");
6437
6438 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
6439 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
6440 << Arg->getSourceRange();
6441
6442 // If the string is the name of a register then we cannot check that it is
6443 // valid here but if the string is of one the forms described in ACLE then we
6444 // can check that the supplied fields are integers and within the valid
6445 // ranges.
6446 if (Fields.size() > 1) {
6447 bool FiveFields = Fields.size() == 5;
6448
6449 bool ValidString = true;
6450 if (IsARMBuiltin) {
6451 ValidString &= Fields[0].startswith_lower("cp") ||
6452 Fields[0].startswith_lower("p");
6453 if (ValidString)
6454 Fields[0] =
6455 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1);
6456
6457 ValidString &= Fields[2].startswith_lower("c");
6458 if (ValidString)
6459 Fields[2] = Fields[2].drop_front(1);
6460
6461 if (FiveFields) {
6462 ValidString &= Fields[3].startswith_lower("c");
6463 if (ValidString)
6464 Fields[3] = Fields[3].drop_front(1);
6465 }
6466 }
6467
6468 SmallVector<int, 5> Ranges;
6469 if (FiveFields)
6470 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
6471 else
6472 Ranges.append({15, 7, 15});
6473
6474 for (unsigned i=0; i<Fields.size(); ++i) {
6475 int IntField;
6476 ValidString &= !Fields[i].getAsInteger(10, IntField);
6477 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
6478 }
6479
6480 if (!ValidString)
6481 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
6482 << Arg->getSourceRange();
6483 } else if (IsAArch64Builtin && Fields.size() == 1) {
6484 // If the register name is one of those that appear in the condition below
6485 // and the special register builtin being used is one of the write builtins,
6486 // then we require that the argument provided for writing to the register
6487 // is an integer constant expression. This is because it will be lowered to
6488 // an MSR (immediate) instruction, so we need to know the immediate at
6489 // compile time.
6490 if (TheCall->getNumArgs() != 2)
6491 return false;
6492
6493 std::string RegLower = Reg.lower();
6494 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
6495 RegLower != "pan" && RegLower != "uao")
6496 return false;
6497
6498 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
6499 }
6500
6501 return false;
6502}
6503
6504/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
6505/// This checks that the target supports __builtin_longjmp and
6506/// that val is a constant 1.
6507bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
6508 if (!Context.getTargetInfo().hasSjLjLowering())
6509 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
6510 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
6511
6512 Expr *Arg = TheCall->getArg(1);
6513 llvm::APSInt Result;
6514
6515 // TODO: This is less than ideal. Overload this to take a value.
6516 if (SemaBuiltinConstantArg(TheCall, 1, Result))
6517 return true;
6518
6519 if (Result != 1)
6520 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
6521 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
6522
6523 return false;
6524}
6525
6526/// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
6527/// This checks that the target supports __builtin_setjmp.
6528bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
6529 if (!Context.getTargetInfo().hasSjLjLowering())
6530 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
6531 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
6532 return false;
6533}
6534
6535namespace {
6536
6537class UncoveredArgHandler {
6538 enum { Unknown = -1, AllCovered = -2 };
6539
6540 signed FirstUncoveredArg = Unknown;
6541 SmallVector<const Expr *, 4> DiagnosticExprs;
6542
6543public:
6544 UncoveredArgHandler() = default;
6545
6546 bool hasUncoveredArg() const {
6547 return (FirstUncoveredArg >= 0);
6548 }
6549
6550 unsigned getUncoveredArg() const {
6551 assert(hasUncoveredArg() && "no uncovered argument");
6552 return FirstUncoveredArg;
6553 }
6554
6555 void setAllCovered() {
6556 // A string has been found with all arguments covered, so clear out
6557 // the diagnostics.
6558 DiagnosticExprs.clear();
6559 FirstUncoveredArg = AllCovered;
6560 }
6561
6562 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
6563 assert(NewFirstUncoveredArg >= 0 && "Outside range");
6564
6565 // Don't update if a previous string covers all arguments.
6566 if (FirstUncoveredArg == AllCovered)
6567 return;
6568
6569 // UncoveredArgHandler tracks the highest uncovered argument index
6570 // and with it all the strings that match this index.
6571 if (NewFirstUncoveredArg == FirstUncoveredArg)
6572 DiagnosticExprs.push_back(StrExpr);
6573 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
6574 DiagnosticExprs.clear();
6575 DiagnosticExprs.push_back(StrExpr);
6576 FirstUncoveredArg = NewFirstUncoveredArg;
6577 }
6578 }
6579
6580 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
6581};
6582
6583enum StringLiteralCheckType {
6584 SLCT_NotALiteral,
6585 SLCT_UncheckedLiteral,
6586 SLCT_CheckedLiteral
6587};
6588
6589} // namespace
6590
6591static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
6592 BinaryOperatorKind BinOpKind,
6593 bool AddendIsRight) {
6594 unsigned BitWidth = Offset.getBitWidth();
6595 unsigned AddendBitWidth = Addend.getBitWidth();
6596 // There might be negative interim results.
6597 if (Addend.isUnsigned()) {
6598 Addend = Addend.zext(++AddendBitWidth);
6599 Addend.setIsSigned(true);
6600 }
6601 // Adjust the bit width of the APSInts.
6602 if (AddendBitWidth > BitWidth) {
6603 Offset = Offset.sext(AddendBitWidth);
6604 BitWidth = AddendBitWidth;
6605 } else if (BitWidth > AddendBitWidth) {
6606 Addend = Addend.sext(BitWidth);
6607 }
6608
6609 bool Ov = false;
6610 llvm::APSInt ResOffset = Offset;
6611 if (BinOpKind == BO_Add)
6612 ResOffset = Offset.sadd_ov(Addend, Ov);
6613 else {
6614 assert(AddendIsRight && BinOpKind == BO_Sub &&
6615 "operator must be add or sub with addend on the right");
6616 ResOffset = Offset.ssub_ov(Addend, Ov);
6617 }
6618
6619 // We add an offset to a pointer here so we should support an offset as big as
6620 // possible.
6621 if (Ov) {
6622 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
6623 "index (intermediate) result too big");
6624 Offset = Offset.sext(2 * BitWidth);
6625 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
6626 return;
6627 }
6628
6629 Offset = ResOffset;
6630}
6631
6632namespace {
6633
6634// This is a wrapper class around StringLiteral to support offsetted string
6635// literals as format strings. It takes the offset into account when returning
6636// the string and its length or the source locations to display notes correctly.
6637class FormatStringLiteral {
6638 const StringLiteral *FExpr;
6639 int64_t Offset;
6640
6641 public:
6642 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
6643 : FExpr(fexpr), Offset(Offset) {}
6644
6645 StringRef getString() const {
6646 return FExpr->getString().drop_front(Offset);
6647 }
6648
6649 unsigned getByteLength() const {
6650 return FExpr->getByteLength() - getCharByteWidth() * Offset;
6651 }
6652
6653 unsigned getLength() const { return FExpr->getLength() - Offset; }
6654 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
6655
6656 StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
6657
6658 QualType getType() const { return FExpr->getType(); }
6659
6660 bool isAscii() const { return FExpr->isAscii(); }
6661 bool isWide() const { return FExpr->isWide(); }
6662 bool isUTF8() const { return FExpr->isUTF8(); }
6663 bool isUTF16() const { return FExpr->isUTF16(); }
6664 bool isUTF32() const { return FExpr->isUTF32(); }
6665 bool isPascal() const { return FExpr->isPascal(); }
6666
6667 SourceLocation getLocationOfByte(
6668 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
6669 const TargetInfo &Target, unsigned *StartToken = nullptr,
6670 unsigned *StartTokenByteOffset = nullptr) const {
6671 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
6672 StartToken, StartTokenByteOffset);
6673 }
6674
6675 SourceLocation getBeginLoc() const LLVM_READONLY {
6676 return FExpr->getBeginLoc().getLocWithOffset(Offset);
6677 }
6678
6679 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
6680};
6681
6682} // namespace
6683
6684static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
6685 const Expr *OrigFormatExpr,
6686 ArrayRef<const Expr *> Args,
6687 bool HasVAListArg, unsigned format_idx,
6688 unsigned firstDataArg,
6689 Sema::FormatStringType Type,
6690 bool inFunctionCall,
6691 Sema::VariadicCallType CallType,
6692 llvm::SmallBitVector &CheckedVarArgs,
6693 UncoveredArgHandler &UncoveredArg);
6694
6695// Determine if an expression is a string literal or constant string.
6696// If this function returns false on the arguments to a function expecting a
6697// format string, we will usually need to emit a warning.
6698// True string literals are then checked by CheckFormatString.
6699static StringLiteralCheckType
6700checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
6701 bool HasVAListArg, unsigned format_idx,
6702 unsigned firstDataArg, Sema::FormatStringType Type,
6703 Sema::VariadicCallType CallType, bool InFunctionCall,
6704 llvm::SmallBitVector &CheckedVarArgs,
6705 UncoveredArgHandler &UncoveredArg,
6706 llvm::APSInt Offset) {
6707 tryAgain:
6708 assert(Offset.isSigned() && "invalid offset");
6709
6710 if (E->isTypeDependent() || E->isValueDependent())
6711 return SLCT_NotALiteral;
6712
6713 E = E->IgnoreParenCasts();
6714
6715 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
6716 // Technically -Wformat-nonliteral does not warn about this case.
6717 // The behavior of printf and friends in this case is implementation
6718 // dependent. Ideally if the format string cannot be null then
6719 // it should have a 'nonnull' attribute in the function prototype.
6720 return SLCT_UncheckedLiteral;
6721
6722 switch (E->getStmtClass()) {
6723 case Stmt::BinaryConditionalOperatorClass:
6724 case Stmt::ConditionalOperatorClass: {
6725 // The expression is a literal if both sub-expressions were, and it was
6726 // completely checked only if both sub-expressions were checked.
6727 const AbstractConditionalOperator *C =
6728 cast<AbstractConditionalOperator>(E);
6729
6730 // Determine whether it is necessary to check both sub-expressions, for
6731 // example, because the condition expression is a constant that can be
6732 // evaluated at compile time.
6733 bool CheckLeft = true, CheckRight = true;
6734
6735 bool Cond;
6736 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext())) {
6737 if (Cond)
6738 CheckRight = false;
6739 else
6740 CheckLeft = false;
6741 }
6742
6743 // We need to maintain the offsets for the right and the left hand side
6744 // separately to check if every possible indexed expression is a valid
6745 // string literal. They might have different offsets for different string
6746 // literals in the end.
6747 StringLiteralCheckType Left;
6748 if (!CheckLeft)
6749 Left = SLCT_UncheckedLiteral;
6750 else {
6751 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
6752 HasVAListArg, format_idx, firstDataArg,
6753 Type, CallType, InFunctionCall,
6754 CheckedVarArgs, UncoveredArg, Offset);
6755 if (Left == SLCT_NotALiteral || !CheckRight) {
6756 return Left;
6757 }
6758 }
6759
6760 StringLiteralCheckType Right =
6761 checkFormatStringExpr(S, C->getFalseExpr(), Args,
6762 HasVAListArg, format_idx, firstDataArg,
6763 Type, CallType, InFunctionCall, CheckedVarArgs,
6764 UncoveredArg, Offset);
6765
6766 return (CheckLeft && Left < Right) ? Left : Right;
6767 }
6768
6769 case Stmt::ImplicitCastExprClass:
6770 E = cast<ImplicitCastExpr>(E)->getSubExpr();
6771 goto tryAgain;
6772
6773 case Stmt::OpaqueValueExprClass:
6774 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
6775 E = src;
6776 goto tryAgain;
6777 }
6778 return SLCT_NotALiteral;
6779
6780 case Stmt::PredefinedExprClass:
6781 // While __func__, etc., are technically not string literals, they
6782 // cannot contain format specifiers and thus are not a security
6783 // liability.
6784 return SLCT_UncheckedLiteral;
6785
6786 case Stmt::DeclRefExprClass: {
6787 const DeclRefExpr *DR = cast<DeclRefExpr>(E);
6788
6789 // As an exception, do not flag errors for variables binding to
6790 // const string literals.
6791 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
6792 bool isConstant = false;
6793 QualType T = DR->getType();
6794
6795 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
6796 isConstant = AT->getElementType().isConstant(S.Context);
6797 } else if (const PointerType *PT = T->getAs<PointerType>()) {
6798 isConstant = T.isConstant(S.Context) &&
6799 PT->getPointeeType().isConstant(S.Context);
6800 } else if (T->isObjCObjectPointerType()) {
6801 // In ObjC, there is usually no "const ObjectPointer" type,
6802 // so don't check if the pointee type is constant.
6803 isConstant = T.isConstant(S.Context);
6804 }
6805
6806 if (isConstant) {
6807 if (const Expr *Init = VD->getAnyInitializer()) {
6808 // Look through initializers like const char c[] = { "foo" }
6809 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
6810 if (InitList->isStringLiteralInit())
6811 Init = InitList->getInit(0)->IgnoreParenImpCasts();
6812 }
6813 return checkFormatStringExpr(S, Init, Args,
6814 HasVAListArg, format_idx,
6815 firstDataArg, Type, CallType,
6816 /*InFunctionCall*/ false, CheckedVarArgs,
6817 UncoveredArg, Offset);
6818 }
6819 }
6820
6821 // For vprintf* functions (i.e., HasVAListArg==true), we add a
6822 // special check to see if the format string is a function parameter
6823 // of the function calling the printf function. If the function
6824 // has an attribute indicating it is a printf-like function, then we
6825 // should suppress warnings concerning non-literals being used in a call
6826 // to a vprintf function. For example:
6827 //
6828 // void
6829 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
6830 // va_list ap;
6831 // va_start(ap, fmt);
6832 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
6833 // ...
6834 // }
6835 if (HasVAListArg) {
6836 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
6837 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
6838 int PVIndex = PV->getFunctionScopeIndex() + 1;
6839 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
6840 // adjust for implicit parameter
6841 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
6842 if (MD->isInstance())
6843 ++PVIndex;
6844 // We also check if the formats are compatible.
6845 // We can't pass a 'scanf' string to a 'printf' function.
6846 if (PVIndex == PVFormat->getFormatIdx() &&
6847 Type == S.GetFormatStringType(PVFormat))
6848 return SLCT_UncheckedLiteral;
6849 }
6850 }
6851 }
6852 }
6853 }
6854
6855 return SLCT_NotALiteral;
6856 }
6857
6858 case Stmt::CallExprClass:
6859 case Stmt::CXXMemberCallExprClass: {
6860 const CallExpr *CE = cast<CallExpr>(E);
6861 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
6862 bool IsFirst = true;
6863 StringLiteralCheckType CommonResult;
6864 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
6865 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
6866 StringLiteralCheckType Result = checkFormatStringExpr(
6867 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
6868 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
6869 if (IsFirst) {
6870 CommonResult = Result;
6871 IsFirst = false;
6872 }
6873 }
6874 if (!IsFirst)
6875 return CommonResult;
6876
6877 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
6878 unsigned BuiltinID = FD->getBuiltinID();
6879 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
6880 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
6881 const Expr *Arg = CE->getArg(0);
6882 return checkFormatStringExpr(S, Arg, Args,
6883 HasVAListArg, format_idx,
6884 firstDataArg, Type, CallType,
6885 InFunctionCall, CheckedVarArgs,
6886 UncoveredArg, Offset);
6887 }
6888 }
6889 }
6890
6891 return SLCT_NotALiteral;
6892 }
6893 case Stmt::ObjCMessageExprClass: {
6894 const auto *ME = cast<ObjCMessageExpr>(E);
6895 if (const auto *ND = ME->getMethodDecl()) {
6896 if (const auto *FA = ND->getAttr<FormatArgAttr>()) {
6897 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
6898 return checkFormatStringExpr(
6899 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
6900 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
6901 }
6902 }
6903
6904 return SLCT_NotALiteral;
6905 }
6906 case Stmt::ObjCStringLiteralClass:
6907 case Stmt::StringLiteralClass: {
6908 const StringLiteral *StrE = nullptr;
6909
6910 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
6911 StrE = ObjCFExpr->getString();
6912 else
6913 StrE = cast<StringLiteral>(E);
6914
6915 if (StrE) {
6916 if (Offset.isNegative() || Offset > StrE->getLength()) {
6917 // TODO: It would be better to have an explicit warning for out of
6918 // bounds literals.
6919 return SLCT_NotALiteral;
6920 }
6921 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
6922 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
6923 firstDataArg, Type, InFunctionCall, CallType,
6924 CheckedVarArgs, UncoveredArg);
6925 return SLCT_CheckedLiteral;
6926 }
6927
6928 return SLCT_NotALiteral;
6929 }
6930 case Stmt::BinaryOperatorClass: {
6931 const BinaryOperator *BinOp = cast<BinaryOperator>(E);
6932
6933 // A string literal + an int offset is still a string literal.
6934 if (BinOp->isAdditiveOp()) {
6935 Expr::EvalResult LResult, RResult;
6936
6937 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
6938 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
6939
6940 if (LIsInt != RIsInt) {
6941 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
6942
6943 if (LIsInt) {
6944 if (BinOpKind == BO_Add) {
6945 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt);
6946 E = BinOp->getRHS();
6947 goto tryAgain;
6948 }
6949 } else {
6950 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt);
6951 E = BinOp->getLHS();
6952 goto tryAgain;
6953 }
6954 }
6955 }
6956
6957 return SLCT_NotALiteral;
6958 }
6959 case Stmt::UnaryOperatorClass: {
6960 const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
6961 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
6962 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
6963 Expr::EvalResult IndexResult;
6964 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
6965 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
6966 /*RHS is int*/ true);
6967 E = ASE->getBase();
6968 goto tryAgain;
6969 }
6970 }
6971
6972 return SLCT_NotALiteral;
6973 }
6974
6975 default:
6976 return SLCT_NotALiteral;
6977 }
6978}
6979
6980Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
6981 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
6982 .Case("scanf", FST_Scanf)
6983 .Cases("printf", "printf0", FST_Printf)
6984 .Cases("NSString", "CFString", FST_NSString)
6985 .Case("strftime", FST_Strftime)
6986 .Case("strfmon", FST_Strfmon)
6987 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
6988 .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
6989 .Case("os_trace", FST_OSLog)
6990 .Case("os_log", FST_OSLog)
6991 .Default(FST_Unknown);
6992}
6993
6994/// CheckFormatArguments - Check calls to printf and scanf (and similar
6995/// functions) for correct use of format strings.
6996/// Returns true if a format string has been fully checked.
6997bool Sema::CheckFormatArguments(const FormatAttr *Format,
6998 ArrayRef<const Expr *> Args,
6999 bool IsCXXMember,
7000 VariadicCallType CallType,
7001 SourceLocation Loc, SourceRange Range,
7002 llvm::SmallBitVector &CheckedVarArgs) {
7003 FormatStringInfo FSI;
7004 if (getFormatStringInfo(Format, IsCXXMember, &FSI))
7005 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
7006 FSI.FirstDataArg, GetFormatStringType(Format),
7007 CallType, Loc, Range, CheckedVarArgs);
7008 return false;
7009}
7010
7011bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
7012 bool HasVAListArg, unsigned format_idx,
7013 unsigned firstDataArg, FormatStringType Type,
7014 VariadicCallType CallType,
7015 SourceLocation Loc, SourceRange Range,
7016 llvm::SmallBitVector &CheckedVarArgs) {
7017 // CHECK: printf/scanf-like function is called with no format string.
7018 if (format_idx >= Args.size()) {
7019 Diag(Loc, diag::warn_missing_format_string) << Range;
7020 return false;
7021 }
7022
7023 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
7024
7025 // CHECK: format string is not a string literal.
7026 //
7027 // Dynamically generated format strings are difficult to
7028 // automatically vet at compile time. Requiring that format strings
7029 // are string literals: (1) permits the checking of format strings by
7030 // the compiler and thereby (2) can practically remove the source of
7031 // many format string exploits.
7032
7033 // Format string can be either ObjC string (e.g. @"%d") or
7034 // C string (e.g. "%d")
7035 // ObjC string uses the same format specifiers as C string, so we can use
7036 // the same format string checking logic for both ObjC and C strings.
7037 UncoveredArgHandler UncoveredArg;
7038 StringLiteralCheckType CT =
7039 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
7040 format_idx, firstDataArg, Type, CallType,
7041 /*IsFunctionCall*/ true, CheckedVarArgs,
7042 UncoveredArg,
7043 /*no string offset*/ llvm::APSInt(64, false) = 0);
7044
7045 // Generate a diagnostic where an uncovered argument is detected.
7046 if (UncoveredArg.hasUncoveredArg()) {
7047 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
7048 assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
7049 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]);
7050 }
7051
7052 if (CT != SLCT_NotALiteral)
7053 // Literal format string found, check done!
7054 return CT == SLCT_CheckedLiteral;
7055
7056 // Strftime is particular as it always uses a single 'time' argument,
7057 // so it is safe to pass a non-literal string.
7058 if (Type == FST_Strftime)
7059 return false;
7060
7061 // Do not emit diag when the string param is a macro expansion and the
7062 // format is either NSString or CFString. This is a hack to prevent
7063 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
7064 // which are usually used in place of NS and CF string literals.
7065 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
7066 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
7067 return false;
7068
7069 // If there are no arguments specified, warn with -Wformat-security, otherwise
7070 // warn only with -Wformat-nonliteral.
7071 if (Args.size() == firstDataArg) {
7072 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
7073 << OrigFormatExpr->getSourceRange();
7074 switch (Type) {
7075 default:
7076 break;
7077 case FST_Kprintf:
7078 case FST_FreeBSDKPrintf:
7079 case FST_Printf:
7080 Diag(FormatLoc, diag::note_format_security_fixit)
7081 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
7082 break;
7083 case FST_NSString:
7084 Diag(FormatLoc, diag::note_format_security_fixit)
7085 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
7086 break;
7087 }
7088 } else {
7089 Diag(FormatLoc, diag::warn_format_nonliteral)
7090 << OrigFormatExpr->getSourceRange();
7091 }
7092 return false;
7093}
7094
7095namespace {
7096
7097class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
7098protected:
7099 Sema &S;
7100 const FormatStringLiteral *FExpr;
7101 const Expr *OrigFormatExpr;
7102 const Sema::FormatStringType FSType;
7103 const unsigned FirstDataArg;
7104 const unsigned NumDataArgs;
7105 const char *Beg; // Start of format string.
7106 const bool HasVAListArg;
7107 ArrayRef<const Expr *> Args;
7108 unsigned FormatIdx;
7109 llvm::SmallBitVector CoveredArgs;
7110 bool usesPositionalArgs = false;
7111 bool atFirstArg = true;
7112 bool inFunctionCall;
7113 Sema::VariadicCallType CallType;
7114 llvm::SmallBitVector &CheckedVarArgs;
7115 UncoveredArgHandler &UncoveredArg;
7116
7117public:
7118 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
7119 const Expr *origFormatExpr,
7120 const Sema::FormatStringType type, unsigned firstDataArg,
7121 unsigned numDataArgs, const char *beg, bool hasVAListArg,
7122 ArrayRef<const Expr *> Args, unsigned formatIdx,
7123 bool inFunctionCall, Sema::VariadicCallType callType,
7124 llvm::SmallBitVector &CheckedVarArgs,
7125 UncoveredArgHandler &UncoveredArg)
7126 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
7127 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
7128 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
7129 inFunctionCall(inFunctionCall), CallType(callType),
7130 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
7131 CoveredArgs.resize(numDataArgs);
7132 CoveredArgs.reset();
7133 }
7134
7135 void DoneProcessing();
7136
7137 void HandleIncompleteSpecifier(const char *startSpecifier,
7138 unsigned specifierLen) override;
7139
7140 void HandleInvalidLengthModifier(
7141 const analyze_format_string::FormatSpecifier &FS,
7142 const analyze_format_string::ConversionSpecifier &CS,
7143 const char *startSpecifier, unsigned specifierLen,
7144 unsigned DiagID);
7145
7146 void HandleNonStandardLengthModifier(
7147 const analyze_format_string::FormatSpecifier &FS,
7148 const char *startSpecifier, unsigned specifierLen);
7149
7150 void HandleNonStandardConversionSpecifier(
7151 const analyze_format_string::ConversionSpecifier &CS,
7152 const char *startSpecifier, unsigned specifierLen);
7153
7154 void HandlePosition(const char *startPos, unsigned posLen) override;
7155
7156 void HandleInvalidPosition(const char *startSpecifier,
7157 unsigned specifierLen,
7158 analyze_format_string::PositionContext p) override;
7159
7160 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
7161
7162 void HandleNullChar(const char *nullCharacter) override;
7163
7164 template <typename Range>
7165 static void
7166 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
7167 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
7168 bool IsStringLocation, Range StringRange,
7169 ArrayRef<FixItHint> Fixit = None);
7170
7171protected:
7172 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
7173 const char *startSpec,
7174 unsigned specifierLen,
7175 const char *csStart, unsigned csLen);
7176
7177 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
7178 const char *startSpec,
7179 unsigned specifierLen);
7180
7181 SourceRange getFormatStringRange();
7182 CharSourceRange getSpecifierRange(const char *startSpecifier,
7183 unsigned specifierLen);
7184 SourceLocation getLocationOfByte(const char *x);
7185
7186 const Expr *getDataArg(unsigned i) const;
7187
7188 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
7189 const analyze_format_string::ConversionSpecifier &CS,
7190 const char *startSpecifier, unsigned specifierLen,
7191 unsigned argIndex);
7192
7193 template <typename Range>
7194 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
7195 bool IsStringLocation, Range StringRange,
7196 ArrayRef<FixItHint> Fixit = None);
7197};
7198
7199} // namespace
7200
7201SourceRange CheckFormatHandler::getFormatStringRange() {
7202 return OrigFormatExpr->getSourceRange();
7203}
7204
7205CharSourceRange CheckFormatHandler::
7206getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
7207 SourceLocation Start = getLocationOfByte(startSpecifier);
7208 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
7209
7210 // Advance the end SourceLocation by one due to half-open ranges.
7211 End = End.getLocWithOffset(1);
7212
7213 return CharSourceRange::getCharRange(Start, End);
7214}
7215
7216SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
7217 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(),
7218 S.getLangOpts(), S.Context.getTargetInfo());
7219}
7220
7221void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
7222 unsigned specifierLen){
7223 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
7224 getLocationOfByte(startSpecifier),
7225 /*IsStringLocation*/true,
7226 getSpecifierRange(startSpecifier, specifierLen));
7227}
7228
7229void CheckFormatHandler::HandleInvalidLengthModifier(
7230 const analyze_format_string::FormatSpecifier &FS,
7231 const analyze_format_string::ConversionSpecifier &CS,
7232 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
7233 using namespace analyze_format_string;
7234
7235 const LengthModifier &LM = FS.getLengthModifier();
7236 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
7237
7238 // See if we know how to fix this length modifier.
7239 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
7240 if (FixedLM) {
7241 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
7242 getLocationOfByte(LM.getStart()),
7243 /*IsStringLocation*/true,
7244 getSpecifierRange(startSpecifier, specifierLen));
7245
7246 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
7247 << FixedLM->toString()
7248 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
7249
7250 } else {
7251 FixItHint Hint;
7252 if (DiagID == diag::warn_format_nonsensical_length)
7253 Hint = FixItHint::CreateRemoval(LMRange);
7254
7255 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
7256 getLocationOfByte(LM.getStart()),
7257 /*IsStringLocation*/true,
7258 getSpecifierRange(startSpecifier, specifierLen),
7259 Hint);
7260 }
7261}
7262
7263void CheckFormatHandler::HandleNonStandardLengthModifier(
7264 const analyze_format_string::FormatSpecifier &FS,
7265 const char *startSpecifier, unsigned specifierLen) {
7266 using namespace analyze_format_string;
7267
7268 const LengthModifier &LM = FS.getLengthModifier();
7269 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
7270
7271 // See if we know how to fix this length modifier.
7272 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
7273 if (FixedLM) {
7274 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7275 << LM.toString() << 0,
7276 getLocationOfByte(LM.getStart()),
7277 /*IsStringLocation*/true,
7278 getSpecifierRange(startSpecifier, specifierLen));
7279
7280 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
7281 << FixedLM->toString()
7282 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
7283
7284 } else {
7285 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7286 << LM.toString() << 0,
7287 getLocationOfByte(LM.getStart()),
7288 /*IsStringLocation*/true,
7289 getSpecifierRange(startSpecifier, specifierLen));
7290 }
7291}
7292
7293void CheckFormatHandler::HandleNonStandardConversionSpecifier(
7294 const analyze_format_string::ConversionSpecifier &CS,
7295 const char *startSpecifier, unsigned specifierLen) {
7296 using namespace analyze_format_string;
7297
7298 // See if we know how to fix this conversion specifier.
7299 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
7300 if (FixedCS) {
7301 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7302 << CS.toString() << /*conversion specifier*/1,
7303 getLocationOfByte(CS.getStart()),
7304 /*IsStringLocation*/true,
7305 getSpecifierRange(startSpecifier, specifierLen));
7306
7307 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
7308 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
7309 << FixedCS->toString()
7310 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
7311 } else {
7312 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7313 << CS.toString() << /*conversion specifier*/1,
7314 getLocationOfByte(CS.getStart()),
7315 /*IsStringLocation*/true,
7316 getSpecifierRange(startSpecifier, specifierLen));
7317 }
7318}
7319
7320void CheckFormatHandler::HandlePosition(const char *startPos,
7321 unsigned posLen) {
7322 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
7323 getLocationOfByte(startPos),
7324 /*IsStringLocation*/true,
7325 getSpecifierRange(startPos, posLen));
7326}
7327
7328void
7329CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
7330 analyze_format_string::PositionContext p) {
7331 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
7332 << (unsigned) p,
7333 getLocationOfByte(startPos), /*IsStringLocation*/true,
7334 getSpecifierRange(startPos, posLen));
7335}
7336
7337void CheckFormatHandler::HandleZeroPosition(const char *startPos,
7338 unsigned posLen) {
7339 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
7340 getLocationOfByte(startPos),
7341 /*IsStringLocation*/true,
7342 getSpecifierRange(startPos, posLen));
7343}
7344
7345void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
7346 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
7347 // The presence of a null character is likely an error.
7348 EmitFormatDiagnostic(
7349 S.PDiag(diag::warn_printf_format_string_contains_null_char),
7350 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
7351 getFormatStringRange());
7352 }
7353}
7354
7355// Note that this may return NULL if there was an error parsing or building
7356// one of the argument expressions.
7357const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
7358 return Args[FirstDataArg + i];
7359}
7360
7361void CheckFormatHandler::DoneProcessing() {
7362 // Does the number of data arguments exceed the number of
7363 // format conversions in the format string?
7364 if (!HasVAListArg) {
7365 // Find any arguments that weren't covered.
7366 CoveredArgs.flip();
7367 signed notCoveredArg = CoveredArgs.find_first();
7368 if (notCoveredArg >= 0) {
7369 assert((unsigned)notCoveredArg < NumDataArgs);
7370 UncoveredArg.Update(notCoveredArg, OrigFormatExpr);
7371 } else {
7372 UncoveredArg.setAllCovered();
7373 }
7374 }
7375}
7376
7377void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
7378 const Expr *ArgExpr) {
7379 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 &&
7380 "Invalid state");
7381
7382 if (!ArgExpr)
7383 return;
7384
7385 SourceLocation Loc = ArgExpr->getBeginLoc();
7386
7387 if (S.getSourceManager().isInSystemMacro(Loc))
7388 return;
7389
7390 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
7391 for (auto E : DiagnosticExprs)
7392 PDiag << E->getSourceRange();
7393
7394 CheckFormatHandler::EmitFormatDiagnostic(
7395 S, IsFunctionCall, DiagnosticExprs[0],
7396 PDiag, Loc, /*IsStringLocation*/false,
7397 DiagnosticExprs[0]->getSourceRange());
7398}
7399
7400bool
7401CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
7402 SourceLocation Loc,
7403 const char *startSpec,
7404 unsigned specifierLen,
7405 const char *csStart,
7406 unsigned csLen) {
7407 bool keepGoing = true;
7408 if (argIndex < NumDataArgs) {
7409 // Consider the argument coverered, even though the specifier doesn't
7410 // make sense.
7411 CoveredArgs.set(argIndex);
7412 }
7413 else {
7414 // If argIndex exceeds the number of data arguments we
7415 // don't issue a warning because that is just a cascade of warnings (and
7416 // they may have intended '%%' anyway). We don't want to continue processing
7417 // the format string after this point, however, as we will like just get
7418 // gibberish when trying to match arguments.
7419 keepGoing = false;
7420 }
7421
7422 StringRef Specifier(csStart, csLen);
7423
7424 // If the specifier in non-printable, it could be the first byte of a UTF-8
7425 // sequence. In that case, print the UTF-8 code point. If not, print the byte
7426 // hex value.
7427 std::string CodePointStr;
7428 if (!llvm::sys::locale::isPrint(*csStart)) {
7429 llvm::UTF32 CodePoint;
7430 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
7431 const llvm::UTF8 *E =
7432 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
7433 llvm::ConversionResult Result =
7434 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion);
7435
7436 if (Result != llvm::conversionOK) {
7437 unsigned char FirstChar = *csStart;
7438 CodePoint = (llvm::UTF32)FirstChar;
7439 }
7440
7441 llvm::raw_string_ostream OS(CodePointStr);
7442 if (CodePoint < 256)
7443 OS << "\\x" << llvm::format("%02x", CodePoint);
7444 else if (CodePoint <= 0xFFFF)
7445 OS << "\\u" << llvm::format("%04x", CodePoint);
7446 else
7447 OS << "\\U" << llvm::format("%08x", CodePoint);
7448 OS.flush();
7449 Specifier = CodePointStr;
7450 }
7451
7452 EmitFormatDiagnostic(
7453 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
7454 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
7455
7456 return keepGoing;
7457}
7458
7459void
7460CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
7461 const char *startSpec,
7462 unsigned specifierLen) {
7463 EmitFormatDiagnostic(
7464 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
7465 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
7466}
7467
7468bool
7469CheckFormatHandler::CheckNumArgs(
7470 const analyze_format_string::FormatSpecifier &FS,
7471 const analyze_format_string::ConversionSpecifier &CS,
7472 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
7473
7474 if (argIndex >= NumDataArgs) {
7475 PartialDiagnostic PDiag = FS.usesPositionalArg()
7476 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
7477 << (argIndex+1) << NumDataArgs)
7478 : S.PDiag(diag::warn_printf_insufficient_data_args);
7479 EmitFormatDiagnostic(
7480 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
7481 getSpecifierRange(startSpecifier, specifierLen));
7482
7483 // Since more arguments than conversion tokens are given, by extension
7484 // all arguments are covered, so mark this as so.
7485 UncoveredArg.setAllCovered();
7486 return false;
7487 }
7488 return true;
7489}
7490
7491template<typename Range>
7492void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
7493 SourceLocation Loc,
7494 bool IsStringLocation,
7495 Range StringRange,
7496 ArrayRef<FixItHint> FixIt) {
7497 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
7498 Loc, IsStringLocation, StringRange, FixIt);
7499}
7500
7501/// If the format string is not within the function call, emit a note
7502/// so that the function call and string are in diagnostic messages.
7503///
7504/// \param InFunctionCall if true, the format string is within the function
7505/// call and only one diagnostic message will be produced. Otherwise, an
7506/// extra note will be emitted pointing to location of the format string.
7507///
7508/// \param ArgumentExpr the expression that is passed as the format string
7509/// argument in the function call. Used for getting locations when two
7510/// diagnostics are emitted.
7511///
7512/// \param PDiag the callee should already have provided any strings for the
7513/// diagnostic message. This function only adds locations and fixits
7514/// to diagnostics.
7515///
7516/// \param Loc primary location for diagnostic. If two diagnostics are
7517/// required, one will be at Loc and a new SourceLocation will be created for
7518/// the other one.
7519///
7520/// \param IsStringLocation if true, Loc points to the format string should be
7521/// used for the note. Otherwise, Loc points to the argument list and will
7522/// be used with PDiag.
7523///
7524/// \param StringRange some or all of the string to highlight. This is
7525/// templated so it can accept either a CharSourceRange or a SourceRange.
7526///
7527/// \param FixIt optional fix it hint for the format string.
7528template <typename Range>
7529void CheckFormatHandler::EmitFormatDiagnostic(
7530 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
7531 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
7532 Range StringRange, ArrayRef<FixItHint> FixIt) {
7533 if (InFunctionCall) {
7534 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
7535 D << StringRange;
7536 D << FixIt;
7537 } else {
7538 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
7539 << ArgumentExpr->getSourceRange();
7540
7541 const Sema::SemaDiagnosticBuilder &Note =
7542 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
7543 diag::note_format_string_defined);
7544
7545 Note << StringRange;
7546 Note << FixIt;
7547 }
7548}
7549
7550//===--- CHECK: Printf format string checking ------------------------------===//
7551
7552namespace {
7553
7554class CheckPrintfHandler : public CheckFormatHandler {
7555public:
7556 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
7557 const Expr *origFormatExpr,
7558 const Sema::FormatStringType type, unsigned firstDataArg,
7559 unsigned numDataArgs, bool isObjC, const char *beg,
7560 bool hasVAListArg, ArrayRef<const Expr *> Args,
7561 unsigned formatIdx, bool inFunctionCall,
7562 Sema::VariadicCallType CallType,
7563 llvm::SmallBitVector &CheckedVarArgs,
7564 UncoveredArgHandler &UncoveredArg)
7565 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
7566 numDataArgs, beg, hasVAListArg, Args, formatIdx,
7567 inFunctionCall, CallType, CheckedVarArgs,
7568 UncoveredArg) {}
7569
7570 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
7571
7572 /// Returns true if '%@' specifiers are allowed in the format string.
7573 bool allowsObjCArg() const {
7574 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
7575 FSType == Sema::FST_OSTrace;
7576 }
7577
7578 bool HandleInvalidPrintfConversionSpecifier(
7579 const analyze_printf::PrintfSpecifier &FS,
7580 const char *startSpecifier,
7581 unsigned specifierLen) override;
7582
7583 void handleInvalidMaskType(StringRef MaskType) override;
7584
7585 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
7586 const char *startSpecifier,
7587 unsigned specifierLen) override;
7588 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
7589 const char *StartSpecifier,
7590 unsigned SpecifierLen,
7591 const Expr *E);
7592
7593 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
7594 const char *startSpecifier, unsigned specifierLen);
7595 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
7596 const analyze_printf::OptionalAmount &Amt,
7597 unsigned type,
7598 const char *startSpecifier, unsigned specifierLen);
7599 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
7600 const analyze_printf::OptionalFlag &flag,
7601 const char *startSpecifier, unsigned specifierLen);
7602 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
7603 const analyze_printf::OptionalFlag &ignoredFlag,
7604 const analyze_printf::OptionalFlag &flag,
7605 const char *startSpecifier, unsigned specifierLen);
7606 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
7607 const Expr *E);
7608
7609 void HandleEmptyObjCModifierFlag(const char *startFlag,
7610 unsigned flagLen) override;
7611
7612 void HandleInvalidObjCModifierFlag(const char *startFlag,
7613 unsigned flagLen) override;
7614
7615 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
7616 const char *flagsEnd,
7617 const char *conversionPosition)
7618 override;
7619};
7620
7621} // namespace
7622
7623bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
7624 const analyze_printf::PrintfSpecifier &FS,
7625 const char *startSpecifier,
7626 unsigned specifierLen) {
7627 const analyze_printf::PrintfConversionSpecifier &CS =
7628 FS.getConversionSpecifier();
7629
7630 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
7631 getLocationOfByte(CS.getStart()),
7632 startSpecifier, specifierLen,
7633 CS.getStart(), CS.getLength());
7634}
7635
7636void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
7637 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
7638}
7639
7640bool CheckPrintfHandler::HandleAmount(
7641 const analyze_format_string::OptionalAmount &Amt,
7642 unsigned k, const char *startSpecifier,
7643 unsigned specifierLen) {
7644 if (Amt.hasDataArgument()) {
7645 if (!HasVAListArg) {
7646 unsigned argIndex = Amt.getArgIndex();
7647 if (argIndex >= NumDataArgs) {
7648 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
7649 << k,
7650 getLocationOfByte(Amt.getStart()),
7651 /*IsStringLocation*/true,
7652 getSpecifierRange(startSpecifier, specifierLen));
7653 // Don't do any more checking. We will just emit
7654 // spurious errors.
7655 return false;
7656 }
7657
7658 // Type check the data argument. It should be an 'int'.
7659 // Although not in conformance with C99, we also allow the argument to be
7660 // an 'unsigned int' as that is a reasonably safe case. GCC also
7661 // doesn't emit a warning for that case.
7662 CoveredArgs.set(argIndex);
7663 const Expr *Arg = getDataArg(argIndex);
7664 if (!Arg)
7665 return false;
7666
7667 QualType T = Arg->getType();
7668
7669 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
7670 assert(AT.isValid());
7671
7672 if (!AT.matchesType(S.Context, T)) {
7673 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
7674 << k << AT.getRepresentativeTypeName(S.Context)
7675 << T << Arg->getSourceRange(),
7676 getLocationOfByte(Amt.getStart()),
7677 /*IsStringLocation*/true,
7678 getSpecifierRange(startSpecifier, specifierLen));
7679 // Don't do any more checking. We will just emit
7680 // spurious errors.
7681 return false;
7682 }
7683 }
7684 }
7685 return true;
7686}
7687
7688void CheckPrintfHandler::HandleInvalidAmount(
7689 const analyze_printf::PrintfSpecifier &FS,
7690 const analyze_printf::OptionalAmount &Amt,
7691 unsigned type,
7692 const char *startSpecifier,
7693 unsigned specifierLen) {
7694 const analyze_printf::PrintfConversionSpecifier &CS =
7695 FS.getConversionSpecifier();
7696
7697 FixItHint fixit =
7698 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
7699 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
7700 Amt.getConstantLength()))
7701 : FixItHint();
7702
7703 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
7704 << type << CS.toString(),
7705 getLocationOfByte(Amt.getStart()),
7706 /*IsStringLocation*/true,
7707 getSpecifierRange(startSpecifier, specifierLen),
7708 fixit);
7709}
7710
7711void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
7712 const analyze_printf::OptionalFlag &flag,
7713 const char *startSpecifier,
7714 unsigned specifierLen) {
7715 // Warn about pointless flag with a fixit removal.
7716 const analyze_printf::PrintfConversionSpecifier &CS =
7717 FS.getConversionSpecifier();
7718 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
7719 << flag.toString() << CS.toString(),
7720 getLocationOfByte(flag.getPosition()),
7721 /*IsStringLocation*/true,
7722 getSpecifierRange(startSpecifier, specifierLen),
7723 FixItHint::CreateRemoval(
7724 getSpecifierRange(flag.getPosition(), 1)));
7725}
7726
7727void CheckPrintfHandler::HandleIgnoredFlag(
7728 const analyze_printf::PrintfSpecifier &FS,
7729 const analyze_printf::OptionalFlag &ignoredFlag,
7730 const analyze_printf::OptionalFlag &flag,
7731 const char *startSpecifier,
7732 unsigned specifierLen) {
7733 // Warn about ignored flag with a fixit removal.
7734 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
7735 << ignoredFlag.toString() << flag.toString(),
7736 getLocationOfByte(ignoredFlag.getPosition()),
7737 /*IsStringLocation*/true,
7738 getSpecifierRange(startSpecifier, specifierLen),
7739 FixItHint::CreateRemoval(
7740 getSpecifierRange(ignoredFlag.getPosition(), 1)));
7741}
7742
7743void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
7744 unsigned flagLen) {
7745 // Warn about an empty flag.
7746 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
7747 getLocationOfByte(startFlag),
7748 /*IsStringLocation*/true,
7749 getSpecifierRange(startFlag, flagLen));
7750}
7751
7752void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
7753 unsigned flagLen) {
7754 // Warn about an invalid flag.
7755 auto Range = getSpecifierRange(startFlag, flagLen);
7756 StringRef flag(startFlag, flagLen);
7757 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
7758 getLocationOfByte(startFlag),
7759 /*IsStringLocation*/true,
7760 Range, FixItHint::CreateRemoval(Range));
7761}
7762
7763void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
7764 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
7765 // Warn about using '[...]' without a '@' conversion.
7766 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1);
7767 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
7768 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1),
7769 getLocationOfByte(conversionPosition),
7770 /*IsStringLocation*/true,
7771 Range, FixItHint::CreateRemoval(Range));
7772}
7773
7774// Determines if the specified is a C++ class or struct containing
7775// a member with the specified name and kind (e.g. a CXXMethodDecl named
7776// "c_str()").
7777template<typename MemberKind>
7778static llvm::SmallPtrSet<MemberKind*, 1>
7779CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
7780 const RecordType *RT = Ty->getAs<RecordType>();
7781 llvm::SmallPtrSet<MemberKind*, 1> Results;
7782
7783 if (!RT)
7784 return Results;
7785 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
7786 if (!RD || !RD->getDefinition())
7787 return Results;
7788
7789 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
7790 Sema::LookupMemberName);
7791 R.suppressDiagnostics();
7792
7793 // We just need to include all members of the right kind turned up by the
7794 // filter, at this point.
7795 if (S.LookupQualifiedName(R, RT->getDecl()))
7796 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
7797 NamedDecl *decl = (*I)->getUnderlyingDecl();
7798 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
7799 Results.insert(FK);
7800 }
7801 return Results;
7802}
7803
7804/// Check if we could call '.c_str()' on an object.
7805///
7806/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
7807/// allow the call, or if it would be ambiguous).
7808bool Sema::hasCStrMethod(const Expr *E) {
7809 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
7810
7811 MethodSet Results =
7812 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
7813 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
7814 MI != ME; ++MI)
7815 if ((*MI)->getMinRequiredArguments() == 0)
7816 return true;
7817 return false;
7818}
7819
7820// Check if a (w)string was passed when a (w)char* was needed, and offer a
7821// better diagnostic if so. AT is assumed to be valid.
7822// Returns true when a c_str() conversion method is found.
7823bool CheckPrintfHandler::checkForCStrMembers(
7824 const analyze_printf::ArgType &AT, const Expr *E) {
7825 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
7826
7827 MethodSet Results =
7828 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
7829
7830 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
7831 MI != ME; ++MI) {
7832 const CXXMethodDecl *Method = *MI;
7833 if (Method->getMinRequiredArguments() == 0 &&
7834 AT.matchesType(S.Context, Method->getReturnType())) {
7835 // FIXME: Suggest parens if the expression needs them.
7836 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
7837 S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
7838 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
7839 return true;
7840 }
7841 }
7842
7843 return false;
7844}
7845
7846bool
7847CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
7848 &FS,
7849 const char *startSpecifier,
7850 unsigned specifierLen) {
7851 using namespace analyze_format_string;
7852 using namespace analyze_printf;
7853
7854 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
7855
7856 if (FS.consumesDataArgument()) {
7857 if (atFirstArg) {
7858 atFirstArg = false;
7859 usesPositionalArgs = FS.usesPositionalArg();
7860 }
7861 else if (usesPositionalArgs != FS.usesPositionalArg()) {
7862 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
7863 startSpecifier, specifierLen);
7864 return false;
7865 }
7866 }
7867
7868 // First check if the field width, precision, and conversion specifier
7869 // have matching data arguments.
7870 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
7871 startSpecifier, specifierLen)) {
7872 return false;
7873 }
7874
7875 if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
7876 startSpecifier, specifierLen)) {
7877 return false;
7878 }
7879
7880 if (!CS.consumesDataArgument()) {
7881 // FIXME: Technically specifying a precision or field width here
7882 // makes no sense. Worth issuing a warning at some point.
7883 return true;
7884 }
7885
7886 // Consume the argument.
7887 unsigned argIndex = FS.getArgIndex();
7888 if (argIndex < NumDataArgs) {
7889 // The check to see if the argIndex is valid will come later.
7890 // We set the bit here because we may exit early from this
7891 // function if we encounter some other error.
7892 CoveredArgs.set(argIndex);
7893 }
7894
7895 // FreeBSD kernel extensions.
7896 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
7897 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
7898 // We need at least two arguments.
7899 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1))
7900 return false;
7901
7902 // Claim the second argument.
7903 CoveredArgs.set(argIndex + 1);
7904
7905 // Type check the first argument (int for %b, pointer for %D)
7906 const Expr *Ex = getDataArg(argIndex);
7907 const analyze_printf::ArgType &AT =
7908 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
7909 ArgType(S.Context.IntTy) : ArgType::CPointerTy;
7910 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
7911 EmitFormatDiagnostic(
7912 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7913 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
7914 << false << Ex->getSourceRange(),
7915 Ex->getBeginLoc(), /*IsStringLocation*/ false,
7916 getSpecifierRange(startSpecifier, specifierLen));
7917
7918 // Type check the second argument (char * for both %b and %D)
7919 Ex = getDataArg(argIndex + 1);
7920 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
7921 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
7922 EmitFormatDiagnostic(
7923 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7924 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
7925 << false << Ex->getSourceRange(),
7926 Ex->getBeginLoc(), /*IsStringLocation*/ false,
7927 getSpecifierRange(startSpecifier, specifierLen));
7928
7929 return true;
7930 }
7931
7932 // Check for using an Objective-C specific conversion specifier
7933 // in a non-ObjC literal.
7934 if (!allowsObjCArg() && CS.isObjCArg()) {
7935 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7936 specifierLen);
7937 }
7938
7939 // %P can only be used with os_log.
7940 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
7941 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7942 specifierLen);
7943 }
7944
7945 // %n is not allowed with os_log.
7946 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
7947 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
7948 getLocationOfByte(CS.getStart()),
7949 /*IsStringLocation*/ false,
7950 getSpecifierRange(startSpecifier, specifierLen));
7951
7952 return true;
7953 }
7954
7955 // Only scalars are allowed for os_trace.
7956 if (FSType == Sema::FST_OSTrace &&
7957 (CS.getKind() == ConversionSpecifier::PArg ||
7958 CS.getKind() == ConversionSpecifier::sArg ||
7959 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
7960 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7961 specifierLen);
7962 }
7963
7964 // Check for use of public/private annotation outside of os_log().
7965 if (FSType != Sema::FST_OSLog) {
7966 if (FS.isPublic().isSet()) {
7967 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
7968 << "public",
7969 getLocationOfByte(FS.isPublic().getPosition()),
7970 /*IsStringLocation*/ false,
7971 getSpecifierRange(startSpecifier, specifierLen));
7972 }
7973 if (FS.isPrivate().isSet()) {
7974 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
7975 << "private",
7976 getLocationOfByte(FS.isPrivate().getPosition()),
7977 /*IsStringLocation*/ false,
7978 getSpecifierRange(startSpecifier, specifierLen));
7979 }
7980 }
7981
7982 // Check for invalid use of field width
7983 if (!FS.hasValidFieldWidth()) {
7984 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
7985 startSpecifier, specifierLen);
7986 }
7987
7988 // Check for invalid use of precision
7989 if (!FS.hasValidPrecision()) {
7990 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
7991 startSpecifier, specifierLen);
7992 }
7993
7994 // Precision is mandatory for %P specifier.
7995 if (CS.getKind() == ConversionSpecifier::PArg &&
7996 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
7997 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
7998 getLocationOfByte(startSpecifier),
7999 /*IsStringLocation*/ false,
8000 getSpecifierRange(startSpecifier, specifierLen));
8001 }
8002
8003 // Check each flag does not conflict with any other component.
8004 if (!FS.hasValidThousandsGroupingPrefix())
8005 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
8006 if (!FS.hasValidLeadingZeros())
8007 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
8008 if (!FS.hasValidPlusPrefix())
8009 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
8010 if (!FS.hasValidSpacePrefix())
8011 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
8012 if (!FS.hasValidAlternativeForm())
8013 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
8014 if (!FS.hasValidLeftJustified())
8015 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
8016
8017 // Check that flags are not ignored by another flag
8018 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
8019 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
8020 startSpecifier, specifierLen);
8021 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
8022 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
8023 startSpecifier, specifierLen);
8024
8025 // Check the length modifier is valid with the given conversion specifier.
8026 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
8027 S.getLangOpts()))
8028 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8029 diag::warn_format_nonsensical_length);
8030 else if (!FS.hasStandardLengthModifier())
8031 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
8032 else if (!FS.hasStandardLengthConversionCombination())
8033 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8034 diag::warn_format_non_standard_conversion_spec);
8035
8036 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
8037 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
8038
8039 // The remaining checks depend on the data arguments.
8040 if (HasVAListArg)
8041 return true;
8042
8043 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
8044 return false;
8045
8046 const Expr *Arg = getDataArg(argIndex);
8047 if (!Arg)
8048 return true;
8049
8050 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
8051}
8052
8053static bool requiresParensToAddCast(const Expr *E) {
8054 // FIXME: We should have a general way to reason about operator
8055 // precedence and whether parens are actually needed here.
8056 // Take care of a few common cases where they aren't.
8057 const Expr *Inside = E->IgnoreImpCasts();
8058 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
8059 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
8060
8061 switch (Inside->getStmtClass()) {
8062 case Stmt::ArraySubscriptExprClass:
8063 case Stmt::CallExprClass:
8064 case Stmt::CharacterLiteralClass:
8065 case Stmt::CXXBoolLiteralExprClass:
8066 case Stmt::DeclRefExprClass:
8067 case Stmt::FloatingLiteralClass:
8068 case Stmt::IntegerLiteralClass:
8069 case Stmt::MemberExprClass:
8070 case Stmt::ObjCArrayLiteralClass:
8071 case Stmt::ObjCBoolLiteralExprClass:
8072 case Stmt::ObjCBoxedExprClass:
8073 case Stmt::ObjCDictionaryLiteralClass:
8074 case Stmt::ObjCEncodeExprClass:
8075 case Stmt::ObjCIvarRefExprClass:
8076 case Stmt::ObjCMessageExprClass:
8077 case Stmt::ObjCPropertyRefExprClass:
8078 case Stmt::ObjCStringLiteralClass:
8079 case Stmt::ObjCSubscriptRefExprClass:
8080 case Stmt::ParenExprClass:
8081 case Stmt::StringLiteralClass:
8082 case Stmt::UnaryOperatorClass:
8083 return false;
8084 default:
8085 return true;
8086 }
8087}
8088
8089static std::pair<QualType, StringRef>
8090shouldNotPrintDirectly(const ASTContext &Context,
8091 QualType IntendedTy,
8092 const Expr *E) {
8093 // Use a 'while' to peel off layers of typedefs.
8094 QualType TyTy = IntendedTy;
8095 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
8096 StringRef Name = UserTy->getDecl()->getName();
8097 QualType CastTy = llvm::StringSwitch<QualType>(Name)
8098 .Case("CFIndex", Context.getNSIntegerType())
8099 .Case("NSInteger", Context.getNSIntegerType())
8100 .Case("NSUInteger", Context.getNSUIntegerType())
8101 .Case("SInt32", Context.IntTy)
8102 .Case("UInt32", Context.UnsignedIntTy)
8103 .Default(QualType());
8104
8105 if (!CastTy.isNull())
8106 return std::make_pair(CastTy, Name);
8107
8108 TyTy = UserTy->desugar();
8109 }
8110
8111 // Strip parens if necessary.
8112 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
8113 return shouldNotPrintDirectly(Context,
8114 PE->getSubExpr()->getType(),
8115 PE->getSubExpr());
8116
8117 // If this is a conditional expression, then its result type is constructed
8118 // via usual arithmetic conversions and thus there might be no necessary
8119 // typedef sugar there. Recurse to operands to check for NSInteger &
8120 // Co. usage condition.
8121 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
8122 QualType TrueTy, FalseTy;
8123 StringRef TrueName, FalseName;
8124
8125 std::tie(TrueTy, TrueName) =
8126 shouldNotPrintDirectly(Context,
8127 CO->getTrueExpr()->getType(),
8128 CO->getTrueExpr());
8129 std::tie(FalseTy, FalseName) =
8130 shouldNotPrintDirectly(Context,
8131 CO->getFalseExpr()->getType(),
8132 CO->getFalseExpr());
8133
8134 if (TrueTy == FalseTy)
8135 return std::make_pair(TrueTy, TrueName);
8136 else if (TrueTy.isNull())
8137 return std::make_pair(FalseTy, FalseName);
8138 else if (FalseTy.isNull())
8139 return std::make_pair(TrueTy, TrueName);
8140 }
8141
8142 return std::make_pair(QualType(), StringRef());
8143}
8144
8145/// Return true if \p ICE is an implicit argument promotion of an arithmetic
8146/// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
8147/// type do not count.
8148static bool
8149isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
8150 QualType From = ICE->getSubExpr()->getType();
8151 QualType To = ICE->getType();
8152 // It's an integer promotion if the destination type is the promoted
8153 // source type.
8154 if (ICE->getCastKind() == CK_IntegralCast &&
8155 From->isPromotableIntegerType() &&
8156 S.Context.getPromotedIntegerType(From) == To)
8157 return true;
8158 // Look through vector types, since we do default argument promotion for
8159 // those in OpenCL.
8160 if (const auto *VecTy = From->getAs<ExtVectorType>())
8161 From = VecTy->getElementType();
8162 if (const auto *VecTy = To->getAs<ExtVectorType>())
8163 To = VecTy->getElementType();
8164 // It's a floating promotion if the source type is a lower rank.
8165 return ICE->getCastKind() == CK_FloatingCast &&
8166 S.Context.getFloatingTypeOrder(From, To) < 0;
8167}
8168
8169bool
8170CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
8171 const char *StartSpecifier,
8172 unsigned SpecifierLen,
8173 const Expr *E) {
8174 using namespace analyze_format_string;
8175 using namespace analyze_printf;
8176
8177 // Now type check the data expression that matches the
8178 // format specifier.
8179 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext());
8180 if (!AT.isValid())
8181 return true;
8182
8183 QualType ExprTy = E->getType();
8184 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) {
8185 ExprTy = TET->getUnderlyingExpr()->getType();
8186 }
8187
8188 const analyze_printf::ArgType::MatchKind Match =
8189 AT.matchesType(S.Context, ExprTy);
8190 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic;
8191 if (Match == analyze_printf::ArgType::Match)
8192 return true;
8193
8194 // Look through argument promotions for our error message's reported type.
8195 // This includes the integral and floating promotions, but excludes array
8196 // and function pointer decay (seeing that an argument intended to be a
8197 // string has type 'char [6]' is probably more confusing than 'char *') and
8198 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
8199 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
8200 if (isArithmeticArgumentPromotion(S, ICE)) {
8201 E = ICE->getSubExpr();
8202 ExprTy = E->getType();
8203
8204 // Check if we didn't match because of an implicit cast from a 'char'
8205 // or 'short' to an 'int'. This is done because printf is a varargs
8206 // function.
8207 if (ICE->getType() == S.Context.IntTy ||
8208 ICE->getType() == S.Context.UnsignedIntTy) {
8209 // All further checking is done on the subexpression.
8210 if (AT.matchesType(S.Context, ExprTy))
8211 return true;
8212 }
8213 }
8214 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
8215 // Special case for 'a', which has type 'int' in C.
8216 // Note, however, that we do /not/ want to treat multibyte constants like
8217 // 'MooV' as characters! This form is deprecated but still exists.
8218 if (ExprTy == S.Context.IntTy)
8219 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
8220 ExprTy = S.Context.CharTy;
8221 }
8222
8223 // Look through enums to their underlying type.
8224 bool IsEnum = false;
8225 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
8226 ExprTy = EnumTy->getDecl()->getIntegerType();
8227 IsEnum = true;
8228 }
8229
8230 // %C in an Objective-C context prints a unichar, not a wchar_t.
8231 // If the argument is an integer of some kind, believe the %C and suggest
8232 // a cast instead of changing the conversion specifier.
8233 QualType IntendedTy = ExprTy;
8234 if (isObjCContext() &&
8235 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
8236 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
8237 !ExprTy->isCharType()) {
8238 // 'unichar' is defined as a typedef of unsigned short, but we should
8239 // prefer using the typedef if it is visible.
8240 IntendedTy = S.Context.UnsignedShortTy;
8241
8242 // While we are here, check if the value is an IntegerLiteral that happens
8243 // to be within the valid range.
8244 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
8245 const llvm::APInt &V = IL->getValue();
8246 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy))
8247 return true;
8248 }
8249
8250 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(),
8251 Sema::LookupOrdinaryName);
8252 if (S.LookupName(Result, S.getCurScope())) {
8253 NamedDecl *ND = Result.getFoundDecl();
8254 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
8255 if (TD->getUnderlyingType() == IntendedTy)
8256 IntendedTy = S.Context.getTypedefType(TD);
8257 }
8258 }
8259 }
8260
8261 // Special-case some of Darwin's platform-independence types by suggesting
8262 // casts to primitive types that are known to be large enough.
8263 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
8264 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
8265 QualType CastTy;
8266 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
8267 if (!CastTy.isNull()) {
8268 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
8269 // (long in ASTContext). Only complain to pedants.
8270 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
8271 (AT.isSizeT() || AT.isPtrdiffT()) &&
8272 AT.matchesType(S.Context, CastTy))
8273 Pedantic = true;
8274 IntendedTy = CastTy;
8275 ShouldNotPrintDirectly = true;
8276 }
8277 }
8278
8279 // We may be able to offer a FixItHint if it is a supported type.
8280 PrintfSpecifier fixedFS = FS;
8281 bool Success =
8282 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
8283
8284 if (Success) {
8285 // Get the fix string from the fixed format specifier
8286 SmallString<16> buf;
8287 llvm::raw_svector_ostream os(buf);
8288 fixedFS.toString(os);
8289
8290 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
8291
8292 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
8293 unsigned Diag =
8294 Pedantic
8295 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8296 : diag::warn_format_conversion_argument_type_mismatch;
8297 // In this case, the specifier is wrong and should be changed to match
8298 // the argument.
8299 EmitFormatDiagnostic(S.PDiag(Diag)
8300 << AT.getRepresentativeTypeName(S.Context)
8301 << IntendedTy << IsEnum << E->getSourceRange(),
8302 E->getBeginLoc(),
8303 /*IsStringLocation*/ false, SpecRange,
8304 FixItHint::CreateReplacement(SpecRange, os.str()));
8305 } else {
8306 // The canonical type for formatting this value is different from the
8307 // actual type of the expression. (This occurs, for example, with Darwin's
8308 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
8309 // should be printed as 'long' for 64-bit compatibility.)
8310 // Rather than emitting a normal format/argument mismatch, we want to
8311 // add a cast to the recommended type (and correct the format string
8312 // if necessary).
8313 SmallString<16> CastBuf;
8314 llvm::raw_svector_ostream CastFix(CastBuf);
8315 CastFix << "(";
8316 IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
8317 CastFix << ")";
8318
8319 SmallVector<FixItHint,4> Hints;
8320 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly)
8321 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
8322
8323 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
8324 // If there's already a cast present, just replace it.
8325 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
8326 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
8327
8328 } else if (!requiresParensToAddCast(E)) {
8329 // If the expression has high enough precedence,
8330 // just write the C-style cast.
8331 Hints.push_back(
8332 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
8333 } else {
8334 // Otherwise, add parens around the expression as well as the cast.
8335 CastFix << "(";
8336 Hints.push_back(
8337 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
8338
8339 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc());
8340 Hints.push_back(FixItHint::CreateInsertion(After, ")"));
8341 }
8342
8343 if (ShouldNotPrintDirectly) {
8344 // The expression has a type that should not be printed directly.
8345 // We extract the name from the typedef because we don't want to show
8346 // the underlying type in the diagnostic.
8347 StringRef Name;
8348 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
8349 Name = TypedefTy->getDecl()->getName();
8350 else
8351 Name = CastTyName;
8352 unsigned Diag = Pedantic
8353 ? diag::warn_format_argument_needs_cast_pedantic
8354 : diag::warn_format_argument_needs_cast;
8355 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
8356 << E->getSourceRange(),
8357 E->getBeginLoc(), /*IsStringLocation=*/false,
8358 SpecRange, Hints);
8359 } else {
8360 // In this case, the expression could be printed using a different
8361 // specifier, but we've decided that the specifier is probably correct
8362 // and we should cast instead. Just use the normal warning message.
8363 EmitFormatDiagnostic(
8364 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
8365 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
8366 << E->getSourceRange(),
8367 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
8368 }
8369 }
8370 } else {
8371 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
8372 SpecifierLen);
8373 // Since the warning for passing non-POD types to variadic functions
8374 // was deferred until now, we emit a warning for non-POD
8375 // arguments here.
8376 switch (S.isValidVarArgType(ExprTy)) {
8377 case Sema::VAK_Valid:
8378 case Sema::VAK_ValidInCXX11: {
8379 unsigned Diag =
8380 Pedantic
8381 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8382 : diag::warn_format_conversion_argument_type_mismatch;
8383
8384 EmitFormatDiagnostic(
8385 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
8386 << IsEnum << CSR << E->getSourceRange(),
8387 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
8388 break;
8389 }
8390 case Sema::VAK_Undefined:
8391 case Sema::VAK_MSVCUndefined:
8392 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string)
8393 << S.getLangOpts().CPlusPlus11 << ExprTy
8394 << CallType
8395 << AT.getRepresentativeTypeName(S.Context) << CSR
8396 << E->getSourceRange(),
8397 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
8398 checkForCStrMembers(AT, E);
8399 break;
8400
8401 case Sema::VAK_Invalid:
8402 if (ExprTy->isObjCObjectType())
8403 EmitFormatDiagnostic(
8404 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
8405 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
8406 << AT.getRepresentativeTypeName(S.Context) << CSR
8407 << E->getSourceRange(),
8408 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
8409 else
8410 // FIXME: If this is an initializer list, suggest removing the braces
8411 // or inserting a cast to the target type.
8412 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
8413 << isa<InitListExpr>(E) << ExprTy << CallType
8414 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
8415 break;
8416 }
8417
8418 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
8419 "format string specifier index out of range");
8420 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
8421 }
8422
8423 return true;
8424}
8425
8426//===--- CHECK: Scanf format string checking ------------------------------===//
8427
8428namespace {
8429
8430class CheckScanfHandler : public CheckFormatHandler {
8431public:
8432 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
8433 const Expr *origFormatExpr, Sema::FormatStringType type,
8434 unsigned firstDataArg, unsigned numDataArgs,
8435 const char *beg, bool hasVAListArg,
8436 ArrayRef<const Expr *> Args, unsigned formatIdx,
8437 bool inFunctionCall, Sema::VariadicCallType CallType,
8438 llvm::SmallBitVector &CheckedVarArgs,
8439 UncoveredArgHandler &UncoveredArg)
8440 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
8441 numDataArgs, beg, hasVAListArg, Args, formatIdx,
8442 inFunctionCall, CallType, CheckedVarArgs,
8443 UncoveredArg) {}
8444
8445 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
8446 const char *startSpecifier,
8447 unsigned specifierLen) override;
8448
8449 bool HandleInvalidScanfConversionSpecifier(
8450 const analyze_scanf::ScanfSpecifier &FS,
8451 const char *startSpecifier,
8452 unsigned specifierLen) override;
8453
8454 void HandleIncompleteScanList(const char *start, const char *end) override;
8455};
8456
8457} // namespace
8458
8459void CheckScanfHandler::HandleIncompleteScanList(const char *start,
8460 const char *end) {
8461 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
8462 getLocationOfByte(end), /*IsStringLocation*/true,
8463 getSpecifierRange(start, end - start));
8464}
8465
8466bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
8467 const analyze_scanf::ScanfSpecifier &FS,
8468 const char *startSpecifier,
8469 unsigned specifierLen) {
8470 const analyze_scanf::ScanfConversionSpecifier &CS =
8471 FS.getConversionSpecifier();
8472
8473 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
8474 getLocationOfByte(CS.getStart()),
8475 startSpecifier, specifierLen,
8476 CS.getStart(), CS.getLength());
8477}
8478
8479bool CheckScanfHandler::HandleScanfSpecifier(
8480 const analyze_scanf::ScanfSpecifier &FS,
8481 const char *startSpecifier,
8482 unsigned specifierLen) {
8483 using namespace analyze_scanf;
8484 using namespace analyze_format_string;
8485
8486 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
8487
8488 // Handle case where '%' and '*' don't consume an argument. These shouldn't
8489 // be used to decide if we are using positional arguments consistently.
8490 if (FS.consumesDataArgument()) {
8491 if (atFirstArg) {
8492 atFirstArg = false;
8493 usesPositionalArgs = FS.usesPositionalArg();
8494 }
8495 else if (usesPositionalArgs != FS.usesPositionalArg()) {
8496 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
8497 startSpecifier, specifierLen);
8498 return false;
8499 }
8500 }
8501
8502 // Check if the field with is non-zero.
8503 const OptionalAmount &Amt = FS.getFieldWidth();
8504 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
8505 if (Amt.getConstantAmount() == 0) {
8506 const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
8507 Amt.getConstantLength());
8508 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
8509 getLocationOfByte(Amt.getStart()),
8510 /*IsStringLocation*/true, R,
8511 FixItHint::CreateRemoval(R));
8512 }
8513 }
8514
8515 if (!FS.consumesDataArgument()) {
8516 // FIXME: Technically specifying a precision or field width here
8517 // makes no sense. Worth issuing a warning at some point.
8518 return true;
8519 }
8520
8521 // Consume the argument.
8522 unsigned argIndex = FS.getArgIndex();
8523 if (argIndex < NumDataArgs) {
8524 // The check to see if the argIndex is valid will come later.
8525 // We set the bit here because we may exit early from this
8526 // function if we encounter some other error.
8527 CoveredArgs.set(argIndex);
8528 }
8529
8530 // Check the length modifier is valid with the given conversion specifier.
8531 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
8532 S.getLangOpts()))
8533 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8534 diag::warn_format_nonsensical_length);
8535 else if (!FS.hasStandardLengthModifier())
8536 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
8537 else if (!FS.hasStandardLengthConversionCombination())
8538 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8539 diag::warn_format_non_standard_conversion_spec);
8540
8541 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
8542 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
8543
8544 // The remaining checks depend on the data arguments.
8545 if (HasVAListArg)
8546 return true;
8547
8548 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
8549 return false;
8550
8551 // Check that the argument type matches the format specifier.
8552 const Expr *Ex = getDataArg(argIndex);
8553 if (!Ex)
8554 return true;
8555
8556 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
8557
8558 if (!AT.isValid()) {
8559 return true;
8560 }
8561
8562 analyze_format_string::ArgType::MatchKind Match =
8563 AT.matchesType(S.Context, Ex->getType());
8564 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
8565 if (Match == analyze_format_string::ArgType::Match)
8566 return true;
8567
8568 ScanfSpecifier fixedFS = FS;
8569 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
8570 S.getLangOpts(), S.Context);
8571
8572 unsigned Diag =
8573 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8574 : diag::warn_format_conversion_argument_type_mismatch;
8575
8576 if (Success) {
8577 // Get the fix string from the fixed format specifier.
8578 SmallString<128> buf;
8579 llvm::raw_svector_ostream os(buf);
8580 fixedFS.toString(os);
8581
8582 EmitFormatDiagnostic(
8583 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
8584 << Ex->getType() << false << Ex->getSourceRange(),
8585 Ex->getBeginLoc(),
8586 /*IsStringLocation*/ false,
8587 getSpecifierRange(startSpecifier, specifierLen),
8588 FixItHint::CreateReplacement(
8589 getSpecifierRange(startSpecifier, specifierLen), os.str()));
8590 } else {
8591 EmitFormatDiagnostic(S.PDiag(Diag)
8592 << AT.getRepresentativeTypeName(S.Context)
8593 << Ex->getType() << false << Ex->getSourceRange(),
8594 Ex->getBeginLoc(),
8595 /*IsStringLocation*/ false,
8596 getSpecifierRange(startSpecifier, specifierLen));
8597 }
8598
8599 return true;
8600}
8601
8602static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
8603 const Expr *OrigFormatExpr,
8604 ArrayRef<const Expr *> Args,
8605 bool HasVAListArg, unsigned format_idx,
8606 unsigned firstDataArg,
8607 Sema::FormatStringType Type,
8608 bool inFunctionCall,
8609 Sema::VariadicCallType CallType,
8610 llvm::SmallBitVector &CheckedVarArgs,
8611 UncoveredArgHandler &UncoveredArg) {
8612 // CHECK: is the format string a wide literal?
8613 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
8614 CheckFormatHandler::EmitFormatDiagnostic(
8615 S, inFunctionCall, Args[format_idx],
8616 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
8617 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
8618 return;
8619 }
8620
8621 // Str - The format string. NOTE: this is NOT null-terminated!
8622 StringRef StrRef = FExpr->getString();
8623 const char *Str = StrRef.data();
8624 // Account for cases where the string literal is truncated in a declaration.
8625 const ConstantArrayType *T =
8626 S.Context.getAsConstantArrayType(FExpr->getType());
8627 assert(T && "String literal not of constant array type!");
8628 size_t TypeSize = T->getSize().getZExtValue();
8629 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
8630 const unsigned numDataArgs = Args.size() - firstDataArg;
8631
8632 // Emit a warning if the string literal is truncated and does not contain an
8633 // embedded null character.
8634 if (TypeSize <= StrRef.size() &&
8635 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
8636 CheckFormatHandler::EmitFormatDiagnostic(
8637 S, inFunctionCall, Args[format_idx],
8638 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
8639 FExpr->getBeginLoc(),
8640 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
8641 return;
8642 }
8643
8644 // CHECK: empty format string?
8645 if (StrLen == 0 && numDataArgs > 0) {
8646 CheckFormatHandler::EmitFormatDiagnostic(
8647 S, inFunctionCall, Args[format_idx],
8648 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
8649 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
8650 return;
8651 }
8652
8653 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
8654 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
8655 Type == Sema::FST_OSTrace) {
8656 CheckPrintfHandler H(
8657 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
8658 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
8659 HasVAListArg, Args, format_idx, inFunctionCall, CallType,
8660 CheckedVarArgs, UncoveredArg);
8661
8662 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
8663 S.getLangOpts(),
8664 S.Context.getTargetInfo(),
8665 Type == Sema::FST_FreeBSDKPrintf))
8666 H.DoneProcessing();
8667 } else if (Type == Sema::FST_Scanf) {
8668 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
8669 numDataArgs, Str, HasVAListArg, Args, format_idx,
8670 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg);
8671
8672 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
8673 S.getLangOpts(),
8674 S.Context.getTargetInfo()))
8675 H.DoneProcessing();
8676 } // TODO: handle other formats
8677}
8678
8679bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
8680 // Str - The format string. NOTE: this is NOT null-terminated!
8681 StringRef StrRef = FExpr->getString();
8682 const char *Str = StrRef.data();
8683 // Account for cases where the string literal is truncated in a declaration.
8684 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
8685 assert(T && "String literal not of constant array type!");
8686 size_t TypeSize = T->getSize().getZExtValue();
8687 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
8688 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
8689 getLangOpts(),
8690 Context.getTargetInfo());
8691}
8692
8693//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
8694
8695// Returns the related absolute value function that is larger, of 0 if one
8696// does not exist.
8697static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
8698 switch (AbsFunction) {
8699 default:
8700 return 0;
8701
8702 case Builtin::BI__builtin_abs:
8703 return Builtin::BI__builtin_labs;
8704 case Builtin::BI__builtin_labs:
8705 return Builtin::BI__builtin_llabs;
8706 case Builtin::BI__builtin_llabs:
8707 return 0;
8708
8709 case Builtin::BI__builtin_fabsf:
8710 return Builtin::BI__builtin_fabs;
8711 case Builtin::BI__builtin_fabs:
8712 return Builtin::BI__builtin_fabsl;
8713 case Builtin::BI__builtin_fabsl:
8714 return 0;
8715
8716 case Builtin::BI__builtin_cabsf:
8717 return Builtin::BI__builtin_cabs;
8718 case Builtin::BI__builtin_cabs:
8719 return Builtin::BI__builtin_cabsl;
8720 case Builtin::BI__builtin_cabsl:
8721 return 0;
8722
8723 case Builtin::BIabs:
8724 return Builtin::BIlabs;
8725 case Builtin::BIlabs:
8726 return Builtin::BIllabs;
8727 case Builtin::BIllabs:
8728 return 0;
8729
8730 case Builtin::BIfabsf:
8731 return Builtin::BIfabs;
8732 case Builtin::BIfabs:
8733 return Builtin::BIfabsl;
8734 case Builtin::BIfabsl:
8735 return 0;
8736
8737 case Builtin::BIcabsf:
8738 return Builtin::BIcabs;
8739 case Builtin::BIcabs:
8740 return Builtin::BIcabsl;
8741 case Builtin::BIcabsl:
8742 return 0;
8743 }
8744}
8745
8746// Returns the argument type of the absolute value function.
8747static QualType getAbsoluteValueArgumentType(ASTContext &Context,
8748 unsigned AbsType) {
8749 if (AbsType == 0)
8750 return QualType();
8751
8752 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
8753 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
8754 if (Error != ASTContext::GE_None)
8755 return QualType();
8756
8757 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
8758 if (!FT)
8759 return QualType();
8760
8761 if (FT->getNumParams() != 1)
8762 return QualType();
8763
8764 return FT->getParamType(0);
8765}
8766
8767// Returns the best absolute value function, or zero, based on type and
8768// current absolute value function.
8769static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
8770 unsigned AbsFunctionKind) {
8771 unsigned BestKind = 0;
8772 uint64_t ArgSize = Context.getTypeSize(ArgType);
8773 for (unsigned Kind = AbsFunctionKind; Kind != 0;
8774 Kind = getLargerAbsoluteValueFunction(Kind)) {
8775 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
8776 if (Context.getTypeSize(ParamType) >= ArgSize) {
8777 if (BestKind == 0)
8778 BestKind = Kind;
8779 else if (Context.hasSameType(ParamType, ArgType)) {
8780 BestKind = Kind;
8781 break;
8782 }
8783 }
8784 }
8785 return BestKind;
8786}
8787
8788enum AbsoluteValueKind {
8789 AVK_Integer,
8790 AVK_Floating,
8791 AVK_Complex
8792};
8793
8794static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
8795 if (T->isIntegralOrEnumerationType())
8796 return AVK_Integer;
8797 if (T->isRealFloatingType())
8798 return AVK_Floating;
8799 if (T->isAnyComplexType())
8800 return AVK_Complex;
8801
8802 llvm_unreachable("Type not integer, floating, or complex");
8803}
8804
8805// Changes the absolute value function to a different type. Preserves whether
8806// the function is a builtin.
8807static unsigned changeAbsFunction(unsigned AbsKind,
8808 AbsoluteValueKind ValueKind) {
8809 switch (ValueKind) {
8810 case AVK_Integer:
8811 switch (AbsKind) {
8812 default:
8813 return 0;
8814 case Builtin::BI__builtin_fabsf:
8815 case Builtin::BI__builtin_fabs:
8816 case Builtin::BI__builtin_fabsl:
8817 case Builtin::BI__builtin_cabsf:
8818 case Builtin::BI__builtin_cabs:
8819 case Builtin::BI__builtin_cabsl:
8820 return Builtin::BI__builtin_abs;
8821 case Builtin::BIfabsf:
8822 case Builtin::BIfabs:
8823 case Builtin::BIfabsl:
8824 case Builtin::BIcabsf:
8825 case Builtin::BIcabs:
8826 case Builtin::BIcabsl:
8827 return Builtin::BIabs;
8828 }
8829 case AVK_Floating:
8830 switch (AbsKind) {
8831 default:
8832 return 0;
8833 case Builtin::BI__builtin_abs:
8834 case Builtin::BI__builtin_labs:
8835 case Builtin::BI__builtin_llabs:
8836 case Builtin::BI__builtin_cabsf:
8837 case Builtin::BI__builtin_cabs:
8838 case Builtin::BI__builtin_cabsl:
8839 return Builtin::BI__builtin_fabsf;
8840 case Builtin::BIabs:
8841 case Builtin::BIlabs:
8842 case Builtin::BIllabs:
8843 case Builtin::BIcabsf:
8844 case Builtin::BIcabs:
8845 case Builtin::BIcabsl:
8846 return Builtin::BIfabsf;
8847 }
8848 case AVK_Complex:
8849 switch (AbsKind) {
8850 default:
8851 return 0;
8852 case Builtin::BI__builtin_abs:
8853 case Builtin::BI__builtin_labs:
8854 case Builtin::BI__builtin_llabs:
8855 case Builtin::BI__builtin_fabsf:
8856 case Builtin::BI__builtin_fabs:
8857 case Builtin::BI__builtin_fabsl:
8858 return Builtin::BI__builtin_cabsf;
8859 case Builtin::BIabs:
8860 case Builtin::BIlabs:
8861 case Builtin::BIllabs:
8862 case Builtin::BIfabsf:
8863 case Builtin::BIfabs:
8864 case Builtin::BIfabsl:
8865 return Builtin::BIcabsf;
8866 }
8867 }
8868 llvm_unreachable("Unable to convert function");
8869}
8870
8871static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
8872 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
8873 if (!FnInfo)
8874 return 0;
8875
8876 switch (FDecl->getBuiltinID()) {
8877 default:
8878 return 0;
8879 case Builtin::BI__builtin_abs:
8880 case Builtin::BI__builtin_fabs:
8881 case Builtin::BI__builtin_fabsf:
8882 case Builtin::BI__builtin_fabsl:
8883 case Builtin::BI__builtin_labs:
8884 case Builtin::BI__builtin_llabs:
8885 case Builtin::BI__builtin_cabs:
8886 case Builtin::BI__builtin_cabsf:
8887 case Builtin::BI__builtin_cabsl:
8888 case Builtin::BIabs:
8889 case Builtin::BIlabs:
8890 case Builtin::BIllabs:
8891 case Builtin::BIfabs:
8892 case Builtin::BIfabsf:
8893 case Builtin::BIfabsl:
8894 case Builtin::BIcabs:
8895 case Builtin::BIcabsf:
8896 case Builtin::BIcabsl:
8897 return FDecl->getBuiltinID();
8898 }
8899 llvm_unreachable("Unknown Builtin type");
8900}
8901
8902// If the replacement is valid, emit a note with replacement function.
8903// Additionally, suggest including the proper header if not already included.
8904static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
8905 unsigned AbsKind, QualType ArgType) {
8906 bool EmitHeaderHint = true;
8907 const char *HeaderName = nullptr;
8908 const char *FunctionName = nullptr;
8909 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
8910 FunctionName = "std::abs";
8911 if (ArgType->isIntegralOrEnumerationType()) {
8912 HeaderName = "cstdlib";
8913 } else if (ArgType->isRealFloatingType()) {
8914 HeaderName = "cmath";
8915 } else {
8916 llvm_unreachable("Invalid Type");
8917 }
8918
8919 // Lookup all std::abs
8920 if (NamespaceDecl *Std = S.getStdNamespace()) {
8921 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName);
8922 R.suppressDiagnostics();
8923 S.LookupQualifiedName(R, Std);
8924
8925 for (const auto *I : R) {
8926 const FunctionDecl *FDecl = nullptr;
8927 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) {
8928 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl());
8929 } else {
8930 FDecl = dyn_cast<FunctionDecl>(I);
8931 }
8932 if (!FDecl)
8933 continue;
8934
8935 // Found std::abs(), check that they are the right ones.
8936 if (FDecl->getNumParams() != 1)
8937 continue;
8938
8939 // Check that the parameter type can handle the argument.
8940 QualType ParamType = FDecl->getParamDecl(0)->getType();
8941 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) &&
8942 S.Context.getTypeSize(ArgType) <=
8943 S.Context.getTypeSize(ParamType)) {
8944 // Found a function, don't need the header hint.
8945 EmitHeaderHint = false;
8946 break;
8947 }
8948 }
8949 }
8950 } else {
8951 FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
8952 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
8953
8954 if (HeaderName) {
8955 DeclarationName DN(&S.Context.Idents.get(FunctionName));
8956 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
8957 R.suppressDiagnostics();
8958 S.LookupName(R, S.getCurScope());
8959
8960 if (R.isSingleResult()) {
8961 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
8962 if (FD && FD->getBuiltinID() == AbsKind) {
8963 EmitHeaderHint = false;
8964 } else {
8965 return;
8966 }
8967 } else if (!R.empty()) {
8968 return;
8969 }
8970 }
8971 }
8972
8973 S.Diag(Loc, diag::note_replace_abs_function)
8974 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
8975
8976 if (!HeaderName)
8977 return;
8978
8979 if (!EmitHeaderHint)
8980 return;
8981
8982 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
8983 << FunctionName;
8984}
8985
8986template <std::size_t StrLen>
8987static bool IsStdFunction(const FunctionDecl *FDecl,
8988 const char (&Str)[StrLen]) {
8989 if (!FDecl)
8990 return false;
8991 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
8992 return false;
8993 if (!FDecl->isInStdNamespace())
8994 return false;
8995
8996 return true;
8997}
8998
8999// Warn when using the wrong abs() function.
9000void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
9001 const FunctionDecl *FDecl) {
9002 if (Call->getNumArgs() != 1)
9003 return;
9004
9005 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
9006 bool IsStdAbs = IsStdFunction(FDecl, "abs");
9007 if (AbsKind == 0 && !IsStdAbs)
9008 return;
9009
9010 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
9011 QualType ParamType = Call->getArg(0)->getType();
9012
9013 // Unsigned types cannot be negative. Suggest removing the absolute value
9014 // function call.
9015 if (ArgType->isUnsignedIntegerType()) {
9016 const char *FunctionName =
9017 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
9018 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
9019 Diag(Call->getExprLoc(), diag::note_remove_abs)
9020 << FunctionName
9021 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
9022 return;
9023 }
9024
9025 // Taking the absolute value of a pointer is very suspicious, they probably
9026 // wanted to index into an array, dereference a pointer, call a function, etc.
9027 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
9028 unsigned DiagType = 0;
9029 if (ArgType->isFunctionType())
9030 DiagType = 1;
9031 else if (ArgType->isArrayType())
9032 DiagType = 2;
9033
9034 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
9035 return;
9036 }
9037
9038 // std::abs has overloads which prevent most of the absolute value problems
9039 // from occurring.
9040 if (IsStdAbs)
9041 return;
9042
9043 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
9044 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
9045
9046 // The argument and parameter are the same kind. Check if they are the right
9047 // size.
9048 if (ArgValueKind == ParamValueKind) {
9049 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
9050 return;
9051
9052 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
9053 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
9054 << FDecl << ArgType << ParamType;
9055
9056 if (NewAbsKind == 0)
9057 return;
9058
9059 emitReplacement(*this, Call->getExprLoc(),
9060 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
9061 return;
9062 }
9063
9064 // ArgValueKind != ParamValueKind
9065 // The wrong type of absolute value function was used. Attempt to find the
9066 // proper one.
9067 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
9068 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
9069 if (NewAbsKind == 0)
9070 return;
9071
9072 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
9073 << FDecl << ParamValueKind << ArgValueKind;
9074
9075 emitReplacement(*this, Call->getExprLoc(),
9076 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
9077}
9078
9079//===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
9080void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
9081 const FunctionDecl *FDecl) {
9082 if (!Call || !FDecl) return;
9083
9084 // Ignore template specializations and macros.
9085 if (inTemplateInstantiation()) return;
9086 if (Call->getExprLoc().isMacroID()) return;
9087
9088 // Only care about the one template argument, two function parameter std::max
9089 if (Call->getNumArgs() != 2) return;
9090 if (!IsStdFunction(FDecl, "max")) return;
9091 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
9092 if (!ArgList) return;
9093 if (ArgList->size() != 1) return;
9094
9095 // Check that template type argument is unsigned integer.
9096 const auto& TA = ArgList->get(0);
9097 if (TA.getKind() != TemplateArgument::Type) return;
9098 QualType ArgType = TA.getAsType();
9099 if (!ArgType->isUnsignedIntegerType()) return;
9100
9101 // See if either argument is a literal zero.
9102 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
9103 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
9104 if (!MTE) return false;
9105 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr());
9106 if (!Num) return false;
9107 if (Num->getValue() != 0) return false;
9108 return true;
9109 };
9110
9111 const Expr *FirstArg = Call->getArg(0);
9112 const Expr *SecondArg = Call->getArg(1);
9113 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
9114 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
9115
9116 // Only warn when exactly one argument is zero.
9117 if (IsFirstArgZero == IsSecondArgZero) return;
9118
9119 SourceRange FirstRange = FirstArg->getSourceRange();
9120 SourceRange SecondRange = SecondArg->getSourceRange();
9121
9122 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
9123
9124 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
9125 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
9126
9127 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
9128 SourceRange RemovalRange;
9129 if (IsFirstArgZero) {
9130 RemovalRange = SourceRange(FirstRange.getBegin(),
9131 SecondRange.getBegin().getLocWithOffset(-1));
9132 } else {
9133 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()),
9134 SecondRange.getEnd());
9135 }
9136
9137 Diag(Call->getExprLoc(), diag::note_remove_max_call)
9138 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
9139 << FixItHint::CreateRemoval(RemovalRange);
9140}
9141
9142//===--- CHECK: Standard memory functions ---------------------------------===//
9143
9144/// Takes the expression passed to the size_t parameter of functions
9145/// such as memcmp, strncat, etc and warns if it's a comparison.
9146///
9147/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
9148static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
9149 IdentifierInfo *FnName,
9150 SourceLocation FnLoc,
9151 SourceLocation RParenLoc) {
9152 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
9153 if (!Size)
9154 return false;
9155
9156 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
9157 if (!Size->isComparisonOp() && !Size->isLogicalOp())
9158 return false;
9159
9160 SourceRange SizeRange = Size->getSourceRange();
9161 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
9162 << SizeRange << FnName;
9163 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
9164 << FnName
9165 << FixItHint::CreateInsertion(
9166 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
9167 << FixItHint::CreateRemoval(RParenLoc);
9168 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
9169 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
9170 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
9171 ")");
9172
9173 return true;
9174}
9175
9176/// Determine whether the given type is or contains a dynamic class type
9177/// (e.g., whether it has a vtable).
9178static const CXXRecordDecl *getContainedDynamicClass(QualType T,
9179 bool &IsContained) {
9180 // Look through array types while ignoring qualifiers.
9181 const Type *Ty = T->getBaseElementTypeUnsafe();
9182 IsContained = false;
9183
9184 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
9185 RD = RD ? RD->getDefinition() : nullptr;
9186 if (!RD || RD->isInvalidDecl())
9187 return nullptr;
9188
9189 if (RD->isDynamicClass())
9190 return RD;
9191
9192 // Check all the fields. If any bases were dynamic, the class is dynamic.
9193 // It's impossible for a class to transitively contain itself by value, so
9194 // infinite recursion is impossible.
9195 for (auto *FD : RD->fields()) {
9196 bool SubContained;
9197 if (const CXXRecordDecl *ContainedRD =
9198 getContainedDynamicClass(FD->getType(), SubContained)) {
9199 IsContained = true;
9200 return ContainedRD;
9201 }
9202 }
9203
9204 return nullptr;
9205}
9206
9207static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
9208 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E))
9209 if (Unary->getKind() == UETT_SizeOf)
9210 return Unary;
9211 return nullptr;
9212}
9213
9214/// If E is a sizeof expression, returns its argument expression,
9215/// otherwise returns NULL.
9216static const Expr *getSizeOfExprArg(const Expr *E) {
9217 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
9218 if (!SizeOf->isArgumentType())
9219 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
9220 return nullptr;
9221}
9222
9223/// If E is a sizeof expression, returns its argument type.
9224static QualType getSizeOfArgType(const Expr *E) {
9225 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
9226 return SizeOf->getTypeOfArgument();
9227 return QualType();
9228}
9229
9230namespace {
9231
9232struct SearchNonTrivialToInitializeField
9233 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
9234 using Super =
9235 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
9236
9237 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
9238
9239 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
9240 SourceLocation SL) {
9241 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
9242 asDerived().visitArray(PDIK, AT, SL);
9243 return;
9244 }
9245
9246 Super::visitWithKind(PDIK, FT, SL);
9247 }
9248
9249 void visitARCStrong(QualType FT, SourceLocation SL) {
9250 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
9251 }
9252 void visitARCWeak(QualType FT, SourceLocation SL) {
9253 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
9254 }
9255 void visitStruct(QualType FT, SourceLocation SL) {
9256 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
9257 visit(FD->getType(), FD->getLocation());
9258 }
9259 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
9260 const ArrayType *AT, SourceLocation SL) {
9261 visit(getContext().getBaseElementType(AT), SL);
9262 }
9263 void visitTrivial(QualType FT, SourceLocation SL) {}
9264
9265 static void diag(QualType RT, const Expr *E, Sema &S) {
9266 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation());
9267 }
9268
9269 ASTContext &getContext() { return S.getASTContext(); }
9270
9271 const Expr *E;
9272 Sema &S;
9273};
9274
9275struct SearchNonTrivialToCopyField
9276 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
9277 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
9278
9279 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
9280
9281 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
9282 SourceLocation SL) {
9283 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
9284 asDerived().visitArray(PCK, AT, SL);
9285 return;
9286 }
9287
9288 Super::visitWithKind(PCK, FT, SL);
9289 }
9290
9291 void visitARCStrong(QualType FT, SourceLocation SL) {
9292 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
9293 }
9294 void visitARCWeak(QualType FT, SourceLocation SL) {
9295 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
9296 }
9297 void visitStruct(QualType FT, SourceLocation SL) {
9298 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
9299 visit(FD->getType(), FD->getLocation());
9300 }
9301 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
9302 SourceLocation SL) {
9303 visit(getContext().getBaseElementType(AT), SL);
9304 }
9305 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
9306 SourceLocation SL) {}
9307 void visitTrivial(QualType FT, SourceLocation SL) {}
9308 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
9309
9310 static void diag(QualType RT, const Expr *E, Sema &S) {
9311 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation());
9312 }
9313
9314 ASTContext &getContext() { return S.getASTContext(); }
9315
9316 const Expr *E;
9317 Sema &S;
9318};
9319
9320}
9321
9322/// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
9323static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
9324 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
9325
9326 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) {
9327 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
9328 return false;
9329
9330 return doesExprLikelyComputeSize(BO->getLHS()) ||
9331 doesExprLikelyComputeSize(BO->getRHS());
9332 }
9333
9334 return getAsSizeOfExpr(SizeofExpr) != nullptr;
9335}
9336
9337/// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
9338///
9339/// \code
9340/// #define MACRO 0
9341/// foo(MACRO);
9342/// foo(0);
9343/// \endcode
9344///
9345/// This should return true for the first call to foo, but not for the second
9346/// (regardless of whether foo is a macro or function).
9347static bool isArgumentExpandedFromMacro(SourceManager &SM,
9348 SourceLocation CallLoc,
9349 SourceLocation ArgLoc) {
9350 if (!CallLoc.isMacroID())
9351 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc);
9352
9353 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) !=
9354 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc));
9355}
9356
9357/// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
9358/// last two arguments transposed.
9359static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
9360 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
9361 return;
9362
9363 const Expr *SizeArg =
9364 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
9365
9366 auto isLiteralZero = [](const Expr *E) {
9367 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
9368 };
9369
9370 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
9371 SourceLocation CallLoc = Call->getRParenLoc();
9372 SourceManager &SM = S.getSourceManager();
9373 if (isLiteralZero(SizeArg) &&
9374 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) {
9375
9376 SourceLocation DiagLoc = SizeArg->getExprLoc();
9377
9378 // Some platforms #define bzero to __builtin_memset. See if this is the
9379 // case, and if so, emit a better diagnostic.
9380 if (BId == Builtin::BIbzero ||
9381 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
9382 CallLoc, SM, S.getLangOpts()) == "bzero")) {
9383 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
9384 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
9385 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) {
9386 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
9387 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
9388 }
9389 return;
9390 }
9391
9392 // If the second argument to a memset is a sizeof expression and the third
9393 // isn't, this is also likely an error. This should catch
9394 // 'memset(buf, sizeof(buf), 0xff)'.
9395 if (BId == Builtin::BImemset &&
9396 doesExprLikelyComputeSize(Call->getArg(1)) &&
9397 !doesExprLikelyComputeSize(Call->getArg(2))) {
9398 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc();
9399 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
9400 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
9401 return;
9402 }
9403}
9404
9405/// Check for dangerous or invalid arguments to memset().
9406///
9407/// This issues warnings on known problematic, dangerous or unspecified
9408/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
9409/// function calls.
9410///
9411/// \param Call The call expression to diagnose.
9412void Sema::CheckMemaccessArguments(const CallExpr *Call,
9413 unsigned BId,
9414 IdentifierInfo *FnName) {
9415 assert(BId != 0);
9416
9417 // It is possible to have a non-standard definition of memset. Validate
9418 // we have enough arguments, and if not, abort further checking.
9419 unsigned ExpectedNumArgs =
9420 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
9421 if (Call->getNumArgs() < ExpectedNumArgs)
9422 return;
9423
9424 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
9425 BId == Builtin::BIstrndup ? 1 : 2);
9426 unsigned LenArg =
9427 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
9428 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
9429
9430 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
9431 Call->getBeginLoc(), Call->getRParenLoc()))
9432 return;
9433
9434 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
9435 CheckMemaccessSize(*this, BId, Call);
9436
9437 // We have special checking when the length is a sizeof expression.
9438 QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
9439 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
9440 llvm::FoldingSetNodeID SizeOfArgID;
9441
9442 // Although widely used, 'bzero' is not a standard function. Be more strict
9443 // with the argument types before allowing diagnostics and only allow the
9444 // form bzero(ptr, sizeof(...)).
9445 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType();
9446 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
9447 return;
9448
9449 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
9450 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
9451 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
9452
9453 QualType DestTy = Dest->getType();
9454 QualType PointeeTy;
9455 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
9456 PointeeTy = DestPtrTy->getPointeeType();
9457
9458 // Never warn about void type pointers. This can be used to suppress
9459 // false positives.
9460 if (PointeeTy->isVoidType())
9461 continue;
9462
9463 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
9464 // actually comparing the expressions for equality. Because computing the
9465 // expression IDs can be expensive, we only do this if the diagnostic is
9466 // enabled.
9467 if (SizeOfArg &&
9468 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
9469 SizeOfArg->getExprLoc())) {
9470 // We only compute IDs for expressions if the warning is enabled, and
9471 // cache the sizeof arg's ID.
9472 if (SizeOfArgID == llvm::FoldingSetNodeID())
9473 SizeOfArg->Profile(SizeOfArgID, Context, true);
9474 llvm::FoldingSetNodeID DestID;
9475 Dest->Profile(DestID, Context, true);
9476 if (DestID == SizeOfArgID) {
9477 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
9478 // over sizeof(src) as well.
9479 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
9480 StringRef ReadableName = FnName->getName();
9481
9482 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
9483 if (UnaryOp->getOpcode() == UO_AddrOf)
9484 ActionIdx = 1; // If its an address-of operator, just remove it.
9485 if (!PointeeTy->isIncompleteType() &&
9486 (Context.getTypeSize(PointeeTy) == Context.getCharWidth()))
9487 ActionIdx = 2; // If the pointee's size is sizeof(char),
9488 // suggest an explicit length.
9489
9490 // If the function is defined as a builtin macro, do not show macro
9491 // expansion.
9492 SourceLocation SL = SizeOfArg->getExprLoc();
9493 SourceRange DSR = Dest->getSourceRange();
9494 SourceRange SSR = SizeOfArg->getSourceRange();
9495 SourceManager &SM = getSourceManager();
9496
9497 if (SM.isMacroArgExpansion(SL)) {
9498 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
9499 SL = SM.getSpellingLoc(SL);
9500 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
9501 SM.getSpellingLoc(DSR.getEnd()));
9502 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
9503 SM.getSpellingLoc(SSR.getEnd()));
9504 }
9505
9506 DiagRuntimeBehavior(SL, SizeOfArg,
9507 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
9508 << ReadableName
9509 << PointeeTy
9510 << DestTy
9511 << DSR
9512 << SSR);
9513 DiagRuntimeBehavior(SL, SizeOfArg,
9514 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
9515 << ActionIdx
9516 << SSR);
9517
9518 break;
9519 }
9520 }
9521
9522 // Also check for cases where the sizeof argument is the exact same
9523 // type as the memory argument, and where it points to a user-defined
9524 // record type.
9525 if (SizeOfArgTy != QualType()) {
9526 if (PointeeTy->isRecordType() &&
9527 Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
9528 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
9529 PDiag(diag::warn_sizeof_pointer_type_memaccess)
9530 << FnName << SizeOfArgTy << ArgIdx
9531 << PointeeTy << Dest->getSourceRange()
9532 << LenExpr->getSourceRange());
9533 break;
9534 }
9535 }
9536 } else if (DestTy->isArrayType()) {
9537 PointeeTy = DestTy;
9538 }
9539
9540 if (PointeeTy == QualType())
9541 continue;
9542
9543 // Always complain about dynamic classes.
9544 bool IsContained;
9545 if (const CXXRecordDecl *ContainedRD =
9546 getContainedDynamicClass(PointeeTy, IsContained)) {
9547
9548 unsigned OperationType = 0;
9549 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
9550 // "overwritten" if we're warning about the destination for any call
9551 // but memcmp; otherwise a verb appropriate to the call.
9552 if (ArgIdx != 0 || IsCmp) {
9553 if (BId == Builtin::BImemcpy)
9554 OperationType = 1;
9555 else if(BId == Builtin::BImemmove)
9556 OperationType = 2;
9557 else if (IsCmp)
9558 OperationType = 3;
9559 }
9560
9561 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9562 PDiag(diag::warn_dyn_class_memaccess)
9563 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
9564 << IsContained << ContainedRD << OperationType
9565 << Call->getCallee()->getSourceRange());
9566 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
9567 BId != Builtin::BImemset)
9568 DiagRuntimeBehavior(
9569 Dest->getExprLoc(), Dest,
9570 PDiag(diag::warn_arc_object_memaccess)
9571 << ArgIdx << FnName << PointeeTy
9572 << Call->getCallee()->getSourceRange());
9573 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
9574 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
9575 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
9576 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9577 PDiag(diag::warn_cstruct_memaccess)
9578 << ArgIdx << FnName << PointeeTy << 0);
9579 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this);
9580 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
9581 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
9582 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9583 PDiag(diag::warn_cstruct_memaccess)
9584 << ArgIdx << FnName << PointeeTy << 1);
9585 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this);
9586 } else {
9587 continue;
9588 }
9589 } else
9590 continue;
9591
9592 DiagRuntimeBehavior(
9593 Dest->getExprLoc(), Dest,
9594 PDiag(diag::note_bad_memaccess_silence)
9595 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
9596 break;
9597 }
9598}
9599
9600// A little helper routine: ignore addition and subtraction of integer literals.
9601// This intentionally does not ignore all integer constant expressions because
9602// we don't want to remove sizeof().
9603static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
9604 Ex = Ex->IgnoreParenCasts();
9605
9606 while (true) {
9607 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
9608 if (!BO || !BO->isAdditiveOp())
9609 break;
9610
9611 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
9612 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
9613
9614 if (isa<IntegerLiteral>(RHS))
9615 Ex = LHS;
9616 else if (isa<IntegerLiteral>(LHS))
9617 Ex = RHS;
9618 else
9619 break;
9620 }
9621
9622 return Ex;
9623}
9624
9625static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
9626 ASTContext &Context) {
9627 // Only handle constant-sized or VLAs, but not flexible members.
9628 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
9629 // Only issue the FIXIT for arrays of size > 1.
9630 if (CAT->getSize().getSExtValue() <= 1)
9631 return false;
9632 } else if (!Ty->isVariableArrayType()) {
9633 return false;
9634 }
9635 return true;
9636}
9637
9638// Warn if the user has made the 'size' argument to strlcpy or strlcat
9639// be the size of the source, instead of the destination.
9640void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
9641 IdentifierInfo *FnName) {
9642
9643 // Don't crash if the user has the wrong number of arguments
9644 unsigned NumArgs = Call->getNumArgs();
9645 if ((NumArgs != 3) && (NumArgs != 4))
9646 return;
9647
9648 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
9649 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
9650 const Expr *CompareWithSrc = nullptr;
9651
9652 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
9653 Call->getBeginLoc(), Call->getRParenLoc()))
9654 return;
9655
9656 // Look for 'strlcpy(dst, x, sizeof(x))'
9657 if (const Expr *Ex = getSizeOfExprArg(SizeArg))
9658 CompareWithSrc = Ex;
9659 else {
9660 // Look for 'strlcpy(dst, x, strlen(x))'
9661 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
9662 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
9663 SizeCall->getNumArgs() == 1)
9664 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
9665 }
9666 }
9667
9668 if (!CompareWithSrc)
9669 return;
9670
9671 // Determine if the argument to sizeof/strlen is equal to the source
9672 // argument. In principle there's all kinds of things you could do
9673 // here, for instance creating an == expression and evaluating it with
9674 // EvaluateAsBooleanCondition, but this uses a more direct technique:
9675 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
9676 if (!SrcArgDRE)
9677 return;
9678
9679 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
9680 if (!CompareWithSrcDRE ||
9681 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
9682 return;
9683
9684 const Expr *OriginalSizeArg = Call->getArg(2);
9685 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
9686 << OriginalSizeArg->getSourceRange() << FnName;
9687
9688 // Output a FIXIT hint if the destination is an array (rather than a
9689 // pointer to an array). This could be enhanced to handle some
9690 // pointers if we know the actual size, like if DstArg is 'array+2'
9691 // we could say 'sizeof(array)-2'.
9692 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
9693 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
9694 return;
9695
9696 SmallString<128> sizeString;
9697 llvm::raw_svector_ostream OS(sizeString);
9698 OS << "sizeof(";
9699 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9700 OS << ")";
9701
9702 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
9703 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
9704 OS.str());
9705}
9706
9707/// Check if two expressions refer to the same declaration.
9708static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
9709 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
9710 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
9711 return D1->getDecl() == D2->getDecl();
9712 return false;
9713}
9714
9715static const Expr *getStrlenExprArg(const Expr *E) {
9716 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
9717 const FunctionDecl *FD = CE->getDirectCallee();
9718 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
9719 return nullptr;
9720 return CE->getArg(0)->IgnoreParenCasts();
9721 }
9722 return nullptr;
9723}
9724
9725// Warn on anti-patterns as the 'size' argument to strncat.
9726// The correct size argument should look like following:
9727// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
9728void Sema::CheckStrncatArguments(const CallExpr *CE,
9729 IdentifierInfo *FnName) {
9730 // Don't crash if the user has the wrong number of arguments.
9731 if (CE->getNumArgs() < 3)
9732 return;
9733 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
9734 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
9735 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
9736
9737 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(),
9738 CE->getRParenLoc()))
9739 return;
9740
9741 // Identify common expressions, which are wrongly used as the size argument
9742 // to strncat and may lead to buffer overflows.
9743 unsigned PatternType = 0;
9744 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
9745 // - sizeof(dst)
9746 if (referToTheSameDecl(SizeOfArg, DstArg))
9747 PatternType = 1;
9748 // - sizeof(src)
9749 else if (referToTheSameDecl(SizeOfArg, SrcArg))
9750 PatternType = 2;
9751 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
9752 if (BE->getOpcode() == BO_Sub) {
9753 const Expr *L = BE->getLHS()->IgnoreParenCasts();
9754 const Expr *R = BE->getRHS()->IgnoreParenCasts();
9755 // - sizeof(dst) - strlen(dst)
9756 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
9757 referToTheSameDecl(DstArg, getStrlenExprArg(R)))
9758 PatternType = 1;
9759 // - sizeof(src) - (anything)
9760 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
9761 PatternType = 2;
9762 }
9763 }
9764
9765 if (PatternType == 0)
9766 return;
9767
9768 // Generate the diagnostic.
9769 SourceLocation SL = LenArg->getBeginLoc();
9770 SourceRange SR = LenArg->getSourceRange();
9771 SourceManager &SM = getSourceManager();
9772
9773 // If the function is defined as a builtin macro, do not show macro expansion.
9774 if (SM.isMacroArgExpansion(SL)) {
9775 SL = SM.getSpellingLoc(SL);
9776 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
9777 SM.getSpellingLoc(SR.getEnd()));
9778 }
9779
9780 // Check if the destination is an array (rather than a pointer to an array).
9781 QualType DstTy = DstArg->getType();
9782 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
9783 Context);
9784 if (!isKnownSizeArray) {
9785 if (PatternType == 1)
9786 Diag(SL, diag::warn_strncat_wrong_size) << SR;
9787 else
9788 Diag(SL, diag::warn_strncat_src_size) << SR;
9789 return;
9790 }
9791
9792 if (PatternType == 1)
9793 Diag(SL, diag::warn_strncat_large_size) << SR;
9794 else
9795 Diag(SL, diag::warn_strncat_src_size) << SR;
9796
9797 SmallString<128> sizeString;
9798 llvm::raw_svector_ostream OS(sizeString);
9799 OS << "sizeof(";
9800 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9801 OS << ") - ";
9802 OS << "strlen(";
9803 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9804 OS << ") - 1";
9805
9806 Diag(SL, diag::note_strncat_wrong_size)
9807 << FixItHint::CreateReplacement(SR, OS.str());
9808}
9809
9810void
9811Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
9812 SourceLocation ReturnLoc,
9813 bool isObjCMethod,
9814 const AttrVec *Attrs,
9815 const FunctionDecl *FD) {
9816 // Check if the return value is null but should not be.
9817 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
9818 (!isObjCMethod && isNonNullType(Context, lhsType))) &&
9819 CheckNonNullExpr(*this, RetValExp))
9820 Diag(ReturnLoc, diag::warn_null_ret)
9821 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
9822
9823 // C++11 [basic.stc.dynamic.allocation]p4:
9824 // If an allocation function declared with a non-throwing
9825 // exception-specification fails to allocate storage, it shall return
9826 // a null pointer. Any other allocation function that fails to allocate
9827 // storage shall indicate failure only by throwing an exception [...]
9828 if (FD) {
9829 OverloadedOperatorKind Op = FD->getOverloadedOperator();
9830 if (Op == OO_New || Op == OO_Array_New) {
9831 const FunctionProtoType *Proto
9832 = FD->getType()->castAs<FunctionProtoType>();
9833 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
9834 CheckNonNullExpr(*this, RetValExp))
9835 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
9836 << FD << getLangOpts().CPlusPlus11;
9837 }
9838 }
9839}
9840
9841//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
9842
9843/// Check for comparisons of floating point operands using != and ==.
9844/// Issue a warning if these are no self-comparisons, as they are not likely
9845/// to do what the programmer intended.
9846void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
9847 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
9848 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
9849
9850 // Special case: check for x == x (which is OK).
9851 // Do not emit warnings for such cases.
9852 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
9853 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
9854 if (DRL->getDecl() == DRR->getDecl())
9855 return;
9856
9857 // Special case: check for comparisons against literals that can be exactly
9858 // represented by APFloat. In such cases, do not emit a warning. This
9859 // is a heuristic: often comparison against such literals are used to
9860 // detect if a value in a variable has not changed. This clearly can
9861 // lead to false negatives.
9862 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
9863 if (FLL->isExact())
9864 return;
9865 } else
9866 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
9867 if (FLR->isExact())
9868 return;
9869
9870 // Check for comparisons with builtin types.
9871 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
9872 if (CL->getBuiltinCallee())
9873 return;
9874
9875 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
9876 if (CR->getBuiltinCallee())
9877 return;
9878
9879 // Emit the diagnostic.
9880 Diag(Loc, diag::warn_floatingpoint_eq)
9881 << LHS->getSourceRange() << RHS->getSourceRange();
9882}
9883
9884//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
9885//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
9886
9887namespace {
9888
9889/// Structure recording the 'active' range of an integer-valued
9890/// expression.
9891struct IntRange {
9892 /// The number of bits active in the int.
9893 unsigned Width;
9894
9895 /// True if the int is known not to have negative values.
9896 bool NonNegative;
9897
9898 IntRange(unsigned Width, bool NonNegative)
9899 : Width(Width), NonNegative(NonNegative) {}
9900
9901 /// Returns the range of the bool type.
9902 static IntRange forBoolType() {
9903 return IntRange(1, true);
9904 }
9905
9906 /// Returns the range of an opaque value of the given integral type.
9907 static IntRange forValueOfType(ASTContext &C, QualType T) {
9908 return forValueOfCanonicalType(C,
9909 T->getCanonicalTypeInternal().getTypePtr());
9910 }
9911
9912 /// Returns the range of an opaque value of a canonical integral type.
9913 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
9914 assert(T->isCanonicalUnqualified());
9915
9916 if (const VectorType *VT = dyn_cast<VectorType>(T))
9917 T = VT->getElementType().getTypePtr();
9918 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
9919 T = CT->getElementType().getTypePtr();
9920 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
9921 T = AT->getValueType().getTypePtr();
9922
9923 if (!C.getLangOpts().CPlusPlus) {
9924 // For enum types in C code, use the underlying datatype.
9925 if (const EnumType *ET = dyn_cast<EnumType>(T))
9926 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
9927 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) {
9928 // For enum types in C++, use the known bit width of the enumerators.
9929 EnumDecl *Enum = ET->getDecl();
9930 // In C++11, enums can have a fixed underlying type. Use this type to
9931 // compute the range.
9932 if (Enum->isFixed()) {
9933 return IntRange(C.getIntWidth(QualType(T, 0)),
9934 !ET->isSignedIntegerOrEnumerationType());
9935 }
9936
9937 unsigned NumPositive = Enum->getNumPositiveBits();
9938 unsigned NumNegative = Enum->getNumNegativeBits();
9939
9940 if (NumNegative == 0)
9941 return IntRange(NumPositive, true/*NonNegative*/);
9942 else
9943 return IntRange(std::max(NumPositive + 1, NumNegative),
9944 false/*NonNegative*/);
9945 }
9946
9947 const BuiltinType *BT = cast<BuiltinType>(T);
9948 assert(BT->isInteger());
9949
9950 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
9951 }
9952
9953 /// Returns the "target" range of a canonical integral type, i.e.
9954 /// the range of values expressible in the type.
9955 ///
9956 /// This matches forValueOfCanonicalType except that enums have the
9957 /// full range of their type, not the range of their enumerators.
9958 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
9959 assert(T->isCanonicalUnqualified());
9960
9961 if (const VectorType *VT = dyn_cast<VectorType>(T))
9962 T = VT->getElementType().getTypePtr();
9963 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
9964 T = CT->getElementType().getTypePtr();
9965 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
9966 T = AT->getValueType().getTypePtr();
9967 if (const EnumType *ET = dyn_cast<EnumType>(T))
9968 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
9969
9970 const BuiltinType *BT = cast<BuiltinType>(T);
9971 assert(BT->isInteger());
9972
9973 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
9974 }
9975
9976 /// Returns the supremum of two ranges: i.e. their conservative merge.
9977 static IntRange join(IntRange L, IntRange R) {
9978 return IntRange(std::max(L.Width, R.Width),
9979 L.NonNegative && R.NonNegative);
9980 }
9981
9982 /// Returns the infinum of two ranges: i.e. their aggressive merge.
9983 static IntRange meet(IntRange L, IntRange R) {
9984 return IntRange(std::min(L.Width, R.Width),
9985 L.NonNegative || R.NonNegative);
9986 }
9987};
9988
9989} // namespace
9990
9991static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
9992 unsigned MaxWidth) {
9993 if (value.isSigned() && value.isNegative())
9994 return IntRange(value.getMinSignedBits(), false);
9995
9996 if (value.getBitWidth() > MaxWidth)
9997 value = value.trunc(MaxWidth);
9998
9999 // isNonNegative() just checks the sign bit without considering
10000 // signedness.
10001 return IntRange(value.getActiveBits(), true);
10002}
10003
10004static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
10005 unsigned MaxWidth) {
10006 if (result.isInt())
10007 return GetValueRange(C, result.getInt(), MaxWidth);
10008
10009 if (result.isVector()) {
10010 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
10011 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
10012 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
10013 R = IntRange::join(R, El);
10014 }
10015 return R;
10016 }
10017
10018 if (result.isComplexInt()) {
10019 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
10020 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
10021 return IntRange::join(R, I);
10022 }
10023
10024 // This can happen with lossless casts to intptr_t of "based" lvalues.
10025 // Assume it might use arbitrary bits.
10026 // FIXME: The only reason we need to pass the type in here is to get
10027 // the sign right on this one case. It would be nice if APValue
10028 // preserved this.
10029 assert(result.isLValue() || result.isAddrLabelDiff());
10030 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
10031}
10032
10033static QualType GetExprType(const Expr *E) {
10034 QualType Ty = E->getType();
10035 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
10036 Ty = AtomicRHS->getValueType();
10037 return Ty;
10038}
10039
10040/// Pseudo-evaluate the given integer expression, estimating the
10041/// range of values it might take.
10042///
10043/// \param MaxWidth - the width to which the value will be truncated
10044static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
10045 E = E->IgnoreParens();
10046
10047 // Try a full evaluation first.
10048 Expr::EvalResult result;
10049 if (E->EvaluateAsRValue(result, C)) {
10050 // [u]intcap_t values produce LValues that can be used as pointers
10051 if (result.Val.isLValue() && result.Val.getLValueBase().isNull()) {
10052 result.Val =
10053 APValue(llvm::APSInt(result.Val.getLValueOffset().getQuantity()));
10054 }
10055 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
10056 }
10057
10058 // I think we only want to look through implicit casts here; if the
10059 // user has an explicit widening cast, we should treat the value as
10060 // being of the new, wider type.
10061 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
10062 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
10063 return GetExprRange(C, CE->getSubExpr(), MaxWidth);
10064
10065 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
10066
10067 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
10068 CE->getCastKind() == CK_BooleanToSignedIntegral;
10069
10070 // Assume that non-integer casts can span the full range of the type.
10071 if (!isIntegerCast)
10072 return OutputTypeRange;
10073
10074 IntRange SubRange
10075 = GetExprRange(C, CE->getSubExpr(),
10076 std::min(MaxWidth, OutputTypeRange.Width));
10077
10078 // Bail out if the subexpr's range is as wide as the cast type.
10079 if (SubRange.Width >= OutputTypeRange.Width)
10080 return OutputTypeRange;
10081
10082 // Otherwise, we take the smaller width, and we're non-negative if
10083 // either the output type or the subexpr is.
10084 return IntRange(SubRange.Width,
10085 SubRange.NonNegative || OutputTypeRange.NonNegative);
10086 }
10087
10088 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
10089 // If we can fold the condition, just take that operand.
10090 bool CondResult;
10091 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
10092 return GetExprRange(C, CondResult ? CO->getTrueExpr()
10093 : CO->getFalseExpr(),
10094 MaxWidth);
10095
10096 // Otherwise, conservatively merge.
10097 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth);
10098 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth);
10099 return IntRange::join(L, R);
10100 }
10101
10102 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
10103 switch (BO->getOpcode()) {
10104 case BO_Cmp:
10105 llvm_unreachable("builtin <=> should have class type");
10106
10107 // Boolean-valued operations are single-bit and positive.
10108 case BO_LAnd:
10109 case BO_LOr:
10110 case BO_LT:
10111 case BO_GT:
10112 case BO_LE:
10113 case BO_GE:
10114 case BO_EQ:
10115 case BO_NE:
10116 return IntRange::forBoolType();
10117
10118 // The type of the assignments is the type of the LHS, so the RHS
10119 // is not necessarily the same type.
10120 case BO_MulAssign:
10121 case BO_DivAssign:
10122 case BO_RemAssign:
10123 case BO_AddAssign:
10124 case BO_SubAssign:
10125 case BO_XorAssign:
10126 case BO_OrAssign:
10127 // TODO: bitfields?
10128 return IntRange::forValueOfType(C, GetExprType(E));
10129
10130 // Simple assignments just pass through the RHS, which will have
10131 // been coerced to the LHS type.
10132 case BO_Assign:
10133 // TODO: bitfields?
10134 return GetExprRange(C, BO->getRHS(), MaxWidth);
10135
10136 // Operations with opaque sources are black-listed.
10137 case BO_PtrMemD:
10138 case BO_PtrMemI:
10139 return IntRange::forValueOfType(C, GetExprType(E));
10140
10141 // Bitwise-and uses the *infinum* of the two source ranges.
10142 case BO_And:
10143 case BO_AndAssign:
10144 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
10145 GetExprRange(C, BO->getRHS(), MaxWidth));
10146
10147 // Left shift gets black-listed based on a judgement call.
10148 case BO_Shl:
10149 // ...except that we want to treat '1 << (blah)' as logically
10150 // positive. It's an important idiom.
10151 if (IntegerLiteral *I
10152 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
10153 if (I->getValue() == 1) {
10154 IntRange R = IntRange::forValueOfType(C, GetExprType(E));
10155 return IntRange(R.Width, /*NonNegative*/ true);
10156 }
10157 }
10158 LLVM_FALLTHROUGH;
10159
10160 case BO_ShlAssign:
10161 return IntRange::forValueOfType(C, GetExprType(E));
10162
10163 // Right shift by a constant can narrow its left argument.
10164 case BO_Shr:
10165 case BO_ShrAssign: {
10166 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
10167
10168 // If the shift amount is a positive constant, drop the width by
10169 // that much.
10170 llvm::APSInt shift;
10171 if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
10172 shift.isNonNegative()) {
10173 unsigned zext = shift.getZExtValue();
10174 if (zext >= L.Width)
10175 L.Width = (L.NonNegative ? 0 : 1);
10176 else
10177 L.Width -= zext;
10178 }
10179
10180 return L;
10181 }
10182
10183 // Comma acts as its right operand.
10184 case BO_Comma:
10185 return GetExprRange(C, BO->getRHS(), MaxWidth);
10186
10187 // Black-list pointer subtractions.
10188 case BO_Sub:
10189 if (BO->getLHS()->getType()->isPointerType())
10190 return IntRange::forValueOfType(C, GetExprType(E));
10191 break;
10192
10193 // The width of a division result is mostly determined by the size
10194 // of the LHS.
10195 case BO_Div: {
10196 // Don't 'pre-truncate' the operands.
10197 unsigned opWidth = C.getIntWidth(GetExprType(E));
10198 IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
10199
10200 // If the divisor is constant, use that.
10201 llvm::APSInt divisor;
10202 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
10203 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
10204 if (log2 >= L.Width)
10205 L.Width = (L.NonNegative ? 0 : 1);
10206 else
10207 L.Width = std::min(L.Width - log2, MaxWidth);
10208 return L;
10209 }
10210
10211 // Otherwise, just use the LHS's width.
10212 IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
10213 return IntRange(L.Width, L.NonNegative && R.NonNegative);
10214 }
10215
10216 // The result of a remainder can't be larger than the result of
10217 // either side.
10218 case BO_Rem: {
10219 // Don't 'pre-truncate' the operands.
10220 unsigned opWidth = C.getIntWidth(GetExprType(E));
10221 IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
10222 IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
10223
10224 IntRange meet = IntRange::meet(L, R);
10225 meet.Width = std::min(meet.Width, MaxWidth);
10226 return meet;
10227 }
10228
10229 // The default behavior is okay for these.
10230 case BO_Mul:
10231 case BO_Add:
10232 case BO_Xor:
10233 case BO_Or:
10234 break;
10235 }
10236
10237 // The default case is to treat the operation as if it were closed
10238 // on the narrowest type that encompasses both operands.
10239 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
10240 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
10241 return IntRange::join(L, R);
10242 }
10243
10244 if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
10245 switch (UO->getOpcode()) {
10246 // Boolean-valued operations are white-listed.
10247 case UO_LNot:
10248 return IntRange::forBoolType();
10249
10250 // Operations with opaque sources are black-listed.
10251 case UO_Deref:
10252 case UO_AddrOf: // should be impossible
10253 return IntRange::forValueOfType(C, GetExprType(E));
10254
10255 default:
10256 return GetExprRange(C, UO->getSubExpr(), MaxWidth);
10257 }
10258 }
10259
10260 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
10261 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth);
10262
10263 if (const auto *BitField = E->getSourceBitField())
10264 return IntRange(BitField->getBitWidthValue(C),
10265 BitField->getType()->isUnsignedIntegerOrEnumerationType());
10266
10267 return IntRange::forValueOfType(C, GetExprType(E));
10268}
10269
10270static IntRange GetExprRange(ASTContext &C, const Expr *E) {
10271 QualType Ty = GetExprType(E);
10272 if (auto BT = dyn_cast<BuiltinType>(Ty.getCanonicalType())) {
10273 if (BT->getKind() == BuiltinType::IntCap)
10274 Ty = C.LongTy;
10275 else if (BT->getKind() == BuiltinType::UIntCap)
10276 Ty = C.UnsignedLongTy;
10277 }
10278 return GetExprRange(C, E, C.getIntWidth(Ty));
10279}
10280
10281/// Checks whether the given value, which currently has the given
10282/// source semantics, has the same value when coerced through the
10283/// target semantics.
10284static bool IsSameFloatAfterCast(const llvm::APFloat &value,
10285 const llvm::fltSemantics &Src,
10286 const llvm::fltSemantics &Tgt) {
10287 llvm::APFloat truncated = value;
10288
10289 bool ignored;
10290 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
10291 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
10292
10293 return truncated.bitwiseIsEqual(value);
10294}
10295
10296/// Checks whether the given value, which currently has the given
10297/// source semantics, has the same value when coerced through the
10298/// target semantics.
10299///
10300/// The value might be a vector of floats (or a complex number).
10301static bool IsSameFloatAfterCast(const APValue &value,
10302 const llvm::fltSemantics &Src,
10303 const llvm::fltSemantics &Tgt) {
10304 if (value.isFloat())
10305 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
10306
10307 if (value.isVector()) {
10308 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
10309 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
10310 return false;
10311 return true;
10312 }
10313
10314 assert(value.isComplexFloat());
10315 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
10316 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
10317}
10318
10319static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
10320
10321static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
10322 // Suppress cases where we are comparing against an enum constant.
10323 if (const DeclRefExpr *DR =
10324 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
10325 if (isa<EnumConstantDecl>(DR->getDecl()))
10326 return true;
10327
10328 // Suppress cases where the '0' value is expanded from a macro.
10329 if (E->getBeginLoc().isMacroID())
10330 return true;
10331
10332 return false;
10333}
10334
10335static bool isKnownToHaveUnsignedValue(Expr *E) {
10336 return E->getType()->isIntegerType() &&
10337 (!E->getType()->isSignedIntegerType() ||
10338 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
10339}
10340
10341namespace {
10342/// The promoted range of values of a type. In general this has the
10343/// following structure:
10344///
10345/// |-----------| . . . |-----------|
10346/// ^ ^ ^ ^
10347/// Min HoleMin HoleMax Max
10348///
10349/// ... where there is only a hole if a signed type is promoted to unsigned
10350/// (in which case Min and Max are the smallest and largest representable
10351/// values).
10352struct PromotedRange {
10353 // Min, or HoleMax if there is a hole.
10354 llvm::APSInt PromotedMin;
10355 // Max, or HoleMin if there is a hole.
10356 llvm::APSInt PromotedMax;
10357
10358 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
10359 if (R.Width == 0)
10360 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
10361 else if (R.Width >= BitWidth && !Unsigned) {
10362 // Promotion made the type *narrower*. This happens when promoting
10363 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
10364 // Treat all values of 'signed int' as being in range for now.
10365 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned);
10366 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned);
10367 } else {
10368 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative)
10369 .extOrTrunc(BitWidth);
10370 PromotedMin.setIsUnsigned(Unsigned);
10371
10372 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative)
10373 .extOrTrunc(BitWidth);
10374 PromotedMax.setIsUnsigned(Unsigned);
10375 }
10376 }
10377
10378 // Determine whether this range is contiguous (has no hole).
10379 bool isContiguous() const { return PromotedMin <= PromotedMax; }
10380
10381 // Where a constant value is within the range.
10382 enum ComparisonResult {
10383 LT = 0x1,
10384 LE = 0x2,
10385 GT = 0x4,
10386 GE = 0x8,
10387 EQ = 0x10,
10388 NE = 0x20,
10389 InRangeFlag = 0x40,
10390
10391 Less = LE | LT | NE,
10392 Min = LE | InRangeFlag,
10393 InRange = InRangeFlag,
10394 Max = GE | InRangeFlag,
10395 Greater = GE | GT | NE,
10396
10397 OnlyValue = LE | GE | EQ | InRangeFlag,
10398 InHole = NE
10399 };
10400
10401 ComparisonResult compare(const llvm::APSInt &Value) const {
10402 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&
10403 Value.isUnsigned() == PromotedMin.isUnsigned());
10404 if (!isContiguous()) {
10405 assert(Value.isUnsigned() && "discontiguous range for signed compare");
10406 if (Value.isMinValue()) return Min;
10407 if (Value.isMaxValue()) return Max;
10408 if (Value >= PromotedMin) return InRange;
10409 if (Value <= PromotedMax) return InRange;
10410 return InHole;
10411 }
10412
10413 switch (llvm::APSInt::compareValues(Value, PromotedMin)) {
10414 case -1: return Less;
10415 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
10416 case 1:
10417 switch (llvm::APSInt::compareValues(Value, PromotedMax)) {
10418 case -1: return InRange;
10419 case 0: return Max;
10420 case 1: return Greater;
10421 }
10422 }
10423
10424 llvm_unreachable("impossible compare result");
10425 }
10426
10427 static llvm::Optional<StringRef>
10428 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
10429 if (Op == BO_Cmp) {
10430 ComparisonResult LTFlag = LT, GTFlag = GT;
10431 if (ConstantOnRHS) std::swap(LTFlag, GTFlag);
10432
10433 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
10434 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
10435 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
10436 return llvm::None;
10437 }
10438
10439 ComparisonResult TrueFlag, FalseFlag;
10440 if (Op == BO_EQ) {
10441 TrueFlag = EQ;
10442 FalseFlag = NE;
10443 } else if (Op == BO_NE) {
10444 TrueFlag = NE;
10445 FalseFlag = EQ;
10446 } else {
10447 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
10448 TrueFlag = LT;
10449 FalseFlag = GE;
10450 } else {
10451 TrueFlag = GT;
10452 FalseFlag = LE;
10453 }
10454 if (Op == BO_GE || Op == BO_LE)
10455 std::swap(TrueFlag, FalseFlag);
10456 }
10457 if (R & TrueFlag)
10458 return StringRef("true");
10459 if (R & FalseFlag)
10460 return StringRef("false");
10461 return llvm::None;
10462 }
10463};
10464}
10465
10466static bool HasEnumType(Expr *E) {
10467 // Strip off implicit integral promotions.
10468 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
10469 if (ICE->getCastKind() != CK_IntegralCast &&
10470 ICE->getCastKind() != CK_NoOp)
10471 break;
10472 E = ICE->getSubExpr();
10473 }
10474
10475 return E->getType()->isEnumeralType();
10476}
10477
10478static int classifyConstantValue(Expr *Constant) {
10479 // The values of this enumeration are used in the diagnostics
10480 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
10481 enum ConstantValueKind {
10482 Miscellaneous = 0,
10483 LiteralTrue,
10484 LiteralFalse
10485 };
10486 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant))
10487 return BL->getValue() ? ConstantValueKind::LiteralTrue
10488 : ConstantValueKind::LiteralFalse;
10489 return ConstantValueKind::Miscellaneous;
10490}
10491
10492static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
10493 Expr *Constant, Expr *Other,
10494 const llvm::APSInt &Value,
10495 bool RhsConstant) {
10496 if (S.inTemplateInstantiation())
10497 return false;
10498
10499 Expr *OriginalOther = Other;
10500
10501 Constant = Constant->IgnoreParenImpCasts();
10502 Other = Other->IgnoreParenImpCasts();
10503
10504 // Suppress warnings on tautological comparisons between values of the same
10505 // enumeration type. There are only two ways we could warn on this:
10506 // - If the constant is outside the range of representable values of
10507 // the enumeration. In such a case, we should warn about the cast
10508 // to enumeration type, not about the comparison.
10509 // - If the constant is the maximum / minimum in-range value. For an
10510 // enumeratin type, such comparisons can be meaningful and useful.
10511 if (Constant->getType()->isEnumeralType() &&
10512 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType()))
10513 return false;
10514
10515 // TODO: Investigate using GetExprRange() to get tighter bounds
10516 // on the bit ranges.
10517 QualType OtherT = Other->getType();
10518 if (const auto *AT = OtherT->getAs<AtomicType>())
10519 OtherT = AT->getValueType();
10520 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
10521
10522 // Whether we're treating Other as being a bool because of the form of
10523 // expression despite it having another type (typically 'int' in C).
10524 bool OtherIsBooleanDespiteType =
10525 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
10526 if (OtherIsBooleanDespiteType)
10527 OtherRange = IntRange::forBoolType();
10528
10529 // Determine the promoted range of the other type and see if a comparison of
10530 // the constant against that range is tautological.
10531 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(),
10532 Value.isUnsigned());
10533 auto Cmp = OtherPromotedRange.compare(Value);
10534 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant);
10535 if (!Result)
10536 return false;
10537
10538 // Suppress the diagnostic for an in-range comparison if the constant comes
10539 // from a macro or enumerator. We don't want to diagnose
10540 //
10541 // some_long_value <= INT_MAX
10542 //
10543 // when sizeof(int) == sizeof(long).
10544 bool InRange = Cmp & PromotedRange::InRangeFlag;
10545 if (InRange && IsEnumConstOrFromMacro(S, Constant))
10546 return false;
10547
10548 // If this is a comparison to an enum constant, include that
10549 // constant in the diagnostic.
10550 const EnumConstantDecl *ED = nullptr;
10551 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant))
10552 ED = dyn_cast<EnumConstantDecl>(DR->getDecl());
10553
10554 // Should be enough for uint128 (39 decimal digits)
10555 SmallString<64> PrettySourceValue;
10556 llvm::raw_svector_ostream OS(PrettySourceValue);
10557 if (ED)
10558 OS << '\'' << *ED << "' (" << Value << ")";
10559 else
10560 OS << Value;
10561
10562 // FIXME: We use a somewhat different formatting for the in-range cases and
10563 // cases involving boolean values for historical reasons. We should pick a
10564 // consistent way of presenting these diagnostics.
10565 if (!InRange || Other->isKnownToHaveBooleanValue()) {
10566 S.DiagRuntimeBehavior(
10567 E->getOperatorLoc(), E,
10568 S.PDiag(!InRange ? diag::warn_out_of_range_compare
10569 : diag::warn_tautological_bool_compare)
10570 << OS.str() << classifyConstantValue(Constant)
10571 << OtherT << OtherIsBooleanDespiteType << *Result
10572 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
10573 } else {
10574 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
10575 ? (HasEnumType(OriginalOther)
10576 ? diag::warn_unsigned_enum_always_true_comparison
10577 : diag::warn_unsigned_always_true_comparison)
10578 : diag::warn_tautological_constant_compare;
10579
10580 S.Diag(E->getOperatorLoc(), Diag)
10581 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
10582 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
10583 }
10584
10585 return true;
10586}
10587
10588/// Analyze the operands of the given comparison. Implements the
10589/// fallback case from AnalyzeComparison.
10590static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
10591 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10592 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10593}
10594
10595/// Implements -Wsign-compare.
10596///
10597/// \param E the binary operator to check for warnings
10598static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
10599 // The type the comparison is being performed in.
10600 QualType T = E->getLHS()->getType();
10601
10602 // Only analyze comparison operators where both sides have been converted to
10603 // the same type.
10604 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
10605 return AnalyzeImpConvsInComparison(S, E);
10606
10607 // Don't analyze value-dependent comparisons directly.
10608 if (E->isValueDependent())
10609 return AnalyzeImpConvsInComparison(S, E);
10610
10611 Expr *LHS = E->getLHS();
10612 Expr *RHS = E->getRHS();
10613
10614 if (T->isIntegralType(S.Context)) {
10615 llvm::APSInt RHSValue;
10616 llvm::APSInt LHSValue;
10617
10618 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context);
10619 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context);
10620
10621 // We don't care about expressions whose result is a constant.
10622 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral)
10623 return AnalyzeImpConvsInComparison(S, E);
10624
10625 // We only care about expressions where just one side is literal
10626 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) {
10627 // Is the constant on the RHS or LHS?
10628 const bool RhsConstant = IsRHSIntegralLiteral;
10629 Expr *Const = RhsConstant ? RHS : LHS;
10630 Expr *Other = RhsConstant ? LHS : RHS;
10631 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue;
10632
10633 // Check whether an integer constant comparison results in a value
10634 // of 'true' or 'false'.
10635 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant))
10636 return AnalyzeImpConvsInComparison(S, E);
10637 }
10638 }
10639
10640 if (!T->hasUnsignedIntegerRepresentation()) {
10641 // We don't do anything special if this isn't an unsigned integral
10642 // comparison: we're only interested in integral comparisons, and
10643 // signed comparisons only happen in cases we don't care to warn about.
10644 return AnalyzeImpConvsInComparison(S, E);
10645 }
10646
10647 LHS = LHS->IgnoreParenImpCasts();
10648 RHS = RHS->IgnoreParenImpCasts();
10649
10650 if (!S.getLangOpts().CPlusPlus) {
10651 // Avoid warning about comparison of integers with different signs when
10652 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
10653 // the type of `E`.
10654 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
10655 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
10656 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
10657 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
10658 }
10659
10660 // Check to see if one of the (unmodified) operands is of different
10661 // signedness.
10662 Expr *signedOperand, *unsignedOperand;
10663 if (LHS->getType()->hasSignedIntegerRepresentation()) {
10664 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
10665 "unsigned comparison between two signed integer expressions?");
10666 signedOperand = LHS;
10667 unsignedOperand = RHS;
10668 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
10669 signedOperand = RHS;
10670 unsignedOperand = LHS;
10671 } else {
10672 return AnalyzeImpConvsInComparison(S, E);
10673 }
10674
10675 // Otherwise, calculate the effective range of the signed operand.
10676 IntRange signedRange = GetExprRange(S.Context, signedOperand);
10677
10678 // Go ahead and analyze implicit conversions in the operands. Note
10679 // that we skip the implicit conversions on both sides.
10680 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
10681 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
10682
10683 // If the signed range is non-negative, -Wsign-compare won't fire.
10684 if (signedRange.NonNegative)
10685 return;
10686
10687 // For (in)equality comparisons, if the unsigned operand is a
10688 // constant which cannot collide with a overflowed signed operand,
10689 // then reinterpreting the signed operand as unsigned will not
10690 // change the result of the comparison.
10691 if (E->isEqualityOp()) {
10692 unsigned comparisonWidth = S.Context.getIntWidth(T);
10693 IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand);
10694
10695 // We should never be unable to prove that the unsigned operand is
10696 // non-negative.
10697 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
10698
10699 if (unsignedRange.Width < comparisonWidth)
10700 return;
10701 }
10702
10703 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
10704 S.PDiag(diag::warn_mixed_sign_comparison)
10705 << LHS->getType() << RHS->getType()
10706 << LHS->getSourceRange() << RHS->getSourceRange());
10707}
10708
10709/// Analyzes an attempt to assign the given value to a bitfield.
10710///
10711/// Returns true if there was something fishy about the attempt.
10712static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
10713 SourceLocation InitLoc) {
10714 assert(Bitfield->isBitField());
10715 if (Bitfield->isInvalidDecl())
10716 return false;
10717
10718 // White-list bool bitfields.
10719 QualType BitfieldType = Bitfield->getType();
10720 if (BitfieldType->isBooleanType())
10721 return false;
10722
10723 if (BitfieldType->isEnumeralType()) {
10724 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl();
10725 // If the underlying enum type was not explicitly specified as an unsigned
10726 // type and the enum contain only positive values, MSVC++ will cause an
10727 // inconsistency by storing this as a signed type.
10728 if (S.getLangOpts().CPlusPlus11 &&
10729 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
10730 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
10731 BitfieldEnumDecl->getNumNegativeBits() == 0) {
10732 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
10733 << BitfieldEnumDecl->getNameAsString();
10734 }
10735 }
10736
10737 if (Bitfield->getType()->isBooleanType())
10738 return false;
10739
10740 // Ignore value- or type-dependent expressions.
10741 if (Bitfield->getBitWidth()->isValueDependent() ||
10742 Bitfield->getBitWidth()->isTypeDependent() ||
10743 Init->isValueDependent() ||
10744 Init->isTypeDependent())
10745 return false;
10746
10747 Expr *OriginalInit = Init->IgnoreParenImpCasts();
10748 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
10749
10750 Expr::EvalResult Result;
10751 if (!OriginalInit->EvaluateAsInt(Result, S.Context,
10752 Expr::SE_AllowSideEffects)) {
10753 // The RHS is not constant. If the RHS has an enum type, make sure the
10754 // bitfield is wide enough to hold all the values of the enum without
10755 // truncation.
10756 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
10757 EnumDecl *ED = EnumTy->getDecl();
10758 bool SignedBitfield = BitfieldType->isSignedIntegerType();
10759
10760 // Enum types are implicitly signed on Windows, so check if there are any
10761 // negative enumerators to see if the enum was intended to be signed or
10762 // not.
10763 bool SignedEnum = ED->getNumNegativeBits() > 0;
10764
10765 // Check for surprising sign changes when assigning enum values to a
10766 // bitfield of different signedness. If the bitfield is signed and we
10767 // have exactly the right number of bits to store this unsigned enum,
10768 // suggest changing the enum to an unsigned type. This typically happens
10769 // on Windows where unfixed enums always use an underlying type of 'int'.
10770 unsigned DiagID = 0;
10771 if (SignedEnum && !SignedBitfield) {
10772 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
10773 } else if (SignedBitfield && !SignedEnum &&
10774 ED->getNumPositiveBits() == FieldWidth) {
10775 DiagID = diag::warn_signed_bitfield_enum_conversion;
10776 }
10777
10778 if (DiagID) {
10779 S.Diag(InitLoc, DiagID) << Bitfield << ED;
10780 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
10781 SourceRange TypeRange =
10782 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
10783 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
10784 << SignedEnum << TypeRange;
10785 }
10786
10787 // Compute the required bitwidth. If the enum has negative values, we need
10788 // one more bit than the normal number of positive bits to represent the
10789 // sign bit.
10790 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1,
10791 ED->getNumNegativeBits())
10792 : ED->getNumPositiveBits();
10793
10794 // Check the bitwidth.
10795 if (BitsNeeded > FieldWidth) {
10796 Expr *WidthExpr = Bitfield->getBitWidth();
10797 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
10798 << Bitfield << ED;
10799 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
10800 << BitsNeeded << ED << WidthExpr->getSourceRange();
10801 }
10802 }
10803
10804 return false;
10805 }
10806
10807 llvm::APSInt Value = Result.Val.getInt();
10808
10809 unsigned OriginalWidth = Value.getBitWidth();
10810
10811 if (!Value.isSigned() || Value.isNegative())
10812 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
10813 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
10814 OriginalWidth = Value.getMinSignedBits();
10815
10816 if (OriginalWidth <= FieldWidth)
10817 return false;
10818
10819 // Compute the value which the bitfield will contain.
10820 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
10821 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
10822
10823 // Check whether the stored value is equal to the original value.
10824 TruncatedValue = TruncatedValue.extend(OriginalWidth);
10825 if (llvm::APSInt::isSameValue(Value, TruncatedValue))
10826 return false;
10827
10828 // Special-case bitfields of width 1: booleans are naturally 0/1, and
10829 // therefore don't strictly fit into a signed bitfield of width 1.
10830 if (FieldWidth == 1 && Value == 1)
10831 return false;
10832
10833 std::string PrettyValue = Value.toString(10);
10834 std::string PrettyTrunc = TruncatedValue.toString(10);
10835
10836 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
10837 << PrettyValue << PrettyTrunc << OriginalInit->getType()
10838 << Init->getSourceRange();
10839
10840 return true;
10841}
10842
10843/// Analyze the given simple or compound assignment for warning-worthy
10844/// operations.
10845static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
10846 // Just recurse on the LHS.
10847 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10848
10849 // We want to recurse on the RHS as normal unless we're assigning to
10850 // a bitfield.
10851 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
10852 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
10853 E->getOperatorLoc())) {
10854 // Recurse, ignoring any implicit conversions on the RHS.
10855 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
10856 E->getOperatorLoc());
10857 }
10858 }
10859
10860 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10861
10862 // Diagnose implicitly sequentially-consistent atomic assignment.
10863 if (E->getLHS()->getType()->isAtomicType())
10864 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
10865}
10866
10867/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
10868static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
10869 SourceLocation CContext, unsigned diag,
10870 bool pruneControlFlow = false) {
10871 if (pruneControlFlow) {
10872 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10873 S.PDiag(diag)
10874 << SourceType << T << E->getSourceRange()
10875 << SourceRange(CContext));
10876 return;
10877 }
10878 S.Diag(E->getExprLoc(), diag)
10879 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
10880}
10881
10882/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
10883static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
10884 SourceLocation CContext,
10885 unsigned diag, bool pruneControlFlow = false) {
10886 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
10887}
10888
10889/// Diagnose an implicit cast from a floating point value to an integer value.
10890static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
10891 SourceLocation CContext) {
10892 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool);
10893 const bool PruneWarnings = S.inTemplateInstantiation();
10894
10895 Expr *InnerE = E->IgnoreParenImpCasts();
10896 // We also want to warn on, e.g., "int i = -1.234"
10897 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
10898 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
10899 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
10900
10901 const bool IsLiteral =
10902 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE);
10903
10904 llvm::APFloat Value(0.0);
10905 bool IsConstant =
10906 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects);
10907 if (!IsConstant) {
10908 return DiagnoseImpCast(S, E, T, CContext,
10909 diag::warn_impcast_float_integer, PruneWarnings);
10910 }
10911
10912 bool isExact = false;
10913
10914 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
10915 T->hasUnsignedIntegerRepresentation());
10916 llvm::APFloat::opStatus Result = Value.convertToInteger(
10917 IntegerValue, llvm::APFloat::rmTowardZero, &isExact);
10918
10919 if (Result == llvm::APFloat::opOK && isExact) {
10920 if (IsLiteral) return;
10921 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
10922 PruneWarnings);
10923 }
10924
10925 // Conversion of a floating-point value to a non-bool integer where the
10926 // integral part cannot be represented by the integer type is undefined.
10927 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
10928 return DiagnoseImpCast(
10929 S, E, T, CContext,
10930 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
10931 : diag::warn_impcast_float_to_integer_out_of_range,
10932 PruneWarnings);
10933
10934 unsigned DiagID = 0;
10935 if (IsLiteral) {
10936 // Warn on floating point literal to integer.
10937 DiagID = diag::warn_impcast_literal_float_to_integer;
10938 } else if (IntegerValue == 0) {
10939 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
10940 return DiagnoseImpCast(S, E, T, CContext,
10941 diag::warn_impcast_float_integer, PruneWarnings);
10942 }
10943 // Warn on non-zero to zero conversion.
10944 DiagID = diag::warn_impcast_float_to_integer_zero;
10945 } else {
10946 if (IntegerValue.isUnsigned()) {
10947 if (!IntegerValue.isMaxValue()) {
10948 return DiagnoseImpCast(S, E, T, CContext,
10949 diag::warn_impcast_float_integer, PruneWarnings);
10950 }
10951 } else { // IntegerValue.isSigned()
10952 if (!IntegerValue.isMaxSignedValue() &&
10953 !IntegerValue.isMinSignedValue()) {
10954 return DiagnoseImpCast(S, E, T, CContext,
10955 diag::warn_impcast_float_integer, PruneWarnings);
10956 }
10957 }
10958 // Warn on evaluatable floating point expression to integer conversion.
10959 DiagID = diag::warn_impcast_float_to_integer;
10960 }
10961
10962 // FIXME: Force the precision of the source value down so we don't print
10963 // digits which are usually useless (we don't really care here if we
10964 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
10965 // would automatically print the shortest representation, but it's a bit
10966 // tricky to implement.
10967 SmallString<16> PrettySourceValue;
10968 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
10969 precision = (precision * 59 + 195) / 196;
10970 Value.toString(PrettySourceValue, precision);
10971
10972 SmallString<16> PrettyTargetValue;
10973 if (IsBool)
10974 PrettyTargetValue = Value.isZero() ? "false" : "true";
10975 else
10976 IntegerValue.toString(PrettyTargetValue);
10977
10978 if (PruneWarnings) {
10979 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10980 S.PDiag(DiagID)
10981 << E->getType() << T.getUnqualifiedType()
10982 << PrettySourceValue << PrettyTargetValue
10983 << E->getSourceRange() << SourceRange(CContext));
10984 } else {
10985 S.Diag(E->getExprLoc(), DiagID)
10986 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
10987 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
10988 }
10989}
10990
10991/// Analyze the given compound assignment for the possible losing of
10992/// floating-point precision.
10993static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
10994 assert(isa<CompoundAssignOperator>(E) &&
10995 "Must be compound assignment operation");
10996 // Recurse on the LHS and RHS in here
10997 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10998 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10999
11000 if (E->getLHS()->getType()->isAtomicType())
11001 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
11002
11003 // Now check the outermost expression
11004 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
11005 const auto *RBT = cast<CompoundAssignOperator>(E)
11006 ->getComputationResultType()
11007 ->getAs<BuiltinType>();
11008
11009 // The below checks assume source is floating point.
11010 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
11011
11012 // If source is floating point but target is an integer.
11013 if (ResultBT->isInteger())
11014 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
11015 E->getExprLoc(), diag::warn_impcast_float_integer);
11016
11017 if (!ResultBT->isFloatingPoint())
11018 return;
11019
11020 // If both source and target are floating points, warn about losing precision.
11021 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
11022 QualType(ResultBT, 0), QualType(RBT, 0));
11023 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
11024 // warn about dropping FP rank.
11025 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
11026 diag::warn_impcast_float_result_precision);
11027}
11028
11029static std::string PrettyPrintInRange(const llvm::APSInt &Value,
11030 IntRange Range) {
11031 if (!Range.Width) return "0";
11032
11033 llvm::APSInt ValueInRange = Value;
11034 ValueInRange.setIsSigned(!Range.NonNegative);
11035 ValueInRange = ValueInRange.trunc(Range.Width);
11036 return ValueInRange.toString(10);
11037}
11038
11039static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
11040 if (!isa<ImplicitCastExpr>(Ex))
11041 return false;
11042
11043 Expr *InnerE = Ex->IgnoreParenImpCasts();
11044 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
11045 const Type *Source =
11046 S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
11047 if (Target->isDependentType())
11048 return false;
11049
11050 const BuiltinType *FloatCandidateBT =
11051 dyn_cast<BuiltinType>(ToBool ? Source : Target);
11052 const Type *BoolCandidateType = ToBool ? Target : Source;
11053
11054 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
11055 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
11056}
11057
11058static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
11059 SourceLocation CC) {
11060 unsigned NumArgs = TheCall->getNumArgs();
11061 for (unsigned i = 0; i < NumArgs; ++i) {
11062 Expr *CurrA = TheCall->getArg(i);
11063 if (!IsImplicitBoolFloatConversion(S, CurrA, true))
11064 continue;
11065
11066 bool IsSwapped = ((i > 0) &&
11067 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
11068 IsSwapped |= ((i < (NumArgs - 1)) &&
11069 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
11070 if (IsSwapped) {
11071 // Warn on this floating-point to bool conversion.
11072 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
11073 CurrA->getType(), CC,
11074 diag::warn_impcast_floating_point_to_bool);
11075 }
11076 }
11077}
11078
11079static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
11080 SourceLocation CC) {
11081 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
11082 E->getExprLoc()))
11083 return;
11084
11085 // Don't warn on functions which have return type nullptr_t.
11086 if (isa<CallExpr>(E))
11087 return;
11088
11089 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
11090 const Expr::NullPointerConstantKind NullKind =
11091 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
11092 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
11093 return;
11094
11095 // Return if target type is a safe conversion.
11096 if (T->isAnyPointerType() || T->isBlockPointerType() ||
11097 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
11098 return;
11099
11100 SourceLocation Loc = E->getSourceRange().getBegin();
11101
11102 // Venture through the macro stacks to get to the source of macro arguments.
11103 // The new location is a better location than the complete location that was
11104 // passed in.
11105 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
11106 CC = S.SourceMgr.getTopMacroCallerLoc(CC);
11107
11108 // __null is usually wrapped in a macro. Go up a macro if that is the case.
11109 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) {
11110 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
11111 Loc, S.SourceMgr, S.getLangOpts());
11112 if (MacroName == "NULL")
11113 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
11114 }
11115
11116 // Only warn if the null and context location are in the same macro expansion.
11117 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
11118 return;
11119
11120 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
11121 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC)
11122 << FixItHint::CreateReplacement(Loc,
11123 S.getFixItZeroLiteralForType(T, Loc));
11124}
11125
11126static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
11127 ObjCArrayLiteral *ArrayLiteral);
11128
11129static void
11130checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
11131 ObjCDictionaryLiteral *DictionaryLiteral);
11132
11133/// Check a single element within a collection literal against the
11134/// target element type.
11135static void checkObjCCollectionLiteralElement(Sema &S,
11136 QualType TargetElementType,
11137 Expr *Element,
11138 unsigned ElementKind) {
11139 // Skip a bitcast to 'id' or qualified 'id'.
11140 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
11141 if (ICE->getCastKind() == CK_BitCast &&
11142 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
11143 Element = ICE->getSubExpr();
11144 }
11145
11146 QualType ElementType = Element->getType();
11147 ExprResult ElementResult(Element);
11148 if (ElementType->getAs<ObjCObjectPointerType>() &&
11149 S.CheckSingleAssignmentConstraints(TargetElementType,
11150 ElementResult,
11151 false, false)
11152 != Sema::Compatible) {
11153 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
11154 << ElementType << ElementKind << TargetElementType
11155 << Element->getSourceRange();
11156 }
11157
11158 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
11159 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
11160 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
11161 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
11162}
11163
11164/// Check an Objective-C array literal being converted to the given
11165/// target type.
11166static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
11167 ObjCArrayLiteral *ArrayLiteral) {
11168 if (!S.NSArrayDecl)
11169 return;
11170
11171 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
11172 if (!TargetObjCPtr)
11173 return;
11174
11175 if (TargetObjCPtr->isUnspecialized() ||
11176 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
11177 != S.NSArrayDecl->getCanonicalDecl())
11178 return;
11179
11180 auto TypeArgs = TargetObjCPtr->getTypeArgs();
11181 if (TypeArgs.size() != 1)
11182 return;
11183
11184 QualType TargetElementType = TypeArgs[0];
11185 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
11186 checkObjCCollectionLiteralElement(S, TargetElementType,
11187 ArrayLiteral->getElement(I),
11188 0);
11189 }
11190}
11191
11192/// Check an Objective-C dictionary literal being converted to the given
11193/// target type.
11194static void
11195checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
11196 ObjCDictionaryLiteral *DictionaryLiteral) {
11197 if (!S.NSDictionaryDecl)
11198 return;
11199
11200 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
11201 if (!TargetObjCPtr)
11202 return;
11203
11204 if (TargetObjCPtr->isUnspecialized() ||
11205 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
11206 != S.NSDictionaryDecl->getCanonicalDecl())
11207 return;
11208
11209 auto TypeArgs = TargetObjCPtr->getTypeArgs();
11210 if (TypeArgs.size() != 2)
11211 return;
11212
11213 QualType TargetKeyType = TypeArgs[0];
11214 QualType TargetObjectType = TypeArgs[1];
11215 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
11216 auto Element = DictionaryLiteral->getKeyValueElement(I);
11217 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
11218 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
11219 }
11220}
11221
11222// Helper function to filter out cases for constant width constant conversion.
11223// Don't warn on char array initialization or for non-decimal values.
11224static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
11225 SourceLocation CC) {
11226 // If initializing from a constant, and the constant starts with '0',
11227 // then it is a binary, octal, or hexadecimal. Allow these constants
11228 // to fill all the bits, even if there is a sign change.
11229 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) {
11230 const char FirstLiteralCharacter =
11231 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0];
11232 if (FirstLiteralCharacter == '0')
11233 return false;
11234 }
11235
11236 // If the CC location points to a '{', and the type is char, then assume
11237 // assume it is an array initialization.
11238 if (CC.isValid() && T->isCharType()) {
11239 const char FirstContextCharacter =
11240 S.getSourceManager().getCharacterData(CC)[0];
11241 if (FirstContextCharacter == '{')
11242 return false;
11243 }
11244
11245 return true;
11246}
11247
11248static void
11249CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
11250 bool *ICContext = nullptr) {
11251 if (E->isTypeDependent() || E->isValueDependent()) return;
11252
11253 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
11254 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
11255 if (Source == Target) return;
11256 if (Target->isDependentType()) return;
11257
11258 // If the conversion context location is invalid don't complain. We also
11259 // don't want to emit a warning if the issue occurs from the expansion of
11260 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
11261 // delay this check as long as possible. Once we detect we are in that
11262 // scenario, we just return.
11263 if (CC.isInvalid())
11264 return;
11265
11266 if (Source->isAtomicType())
11267 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
11268
11269 // Diagnose implicit casts to bool.
11270 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
11271 if (isa<StringLiteral>(E))
11272 // Warn on string literal to bool. Checks for string literals in logical
11273 // and expressions, for instance, assert(0 && "error here"), are
11274 // prevented by a check in AnalyzeImplicitConversions().
11275 return DiagnoseImpCast(S, E, T, CC,
11276 diag::warn_impcast_string_literal_to_bool);
11277 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
11278 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
11279 // This covers the literal expressions that evaluate to Objective-C
11280 // objects.
11281 return DiagnoseImpCast(S, E, T, CC,
11282 diag::warn_impcast_objective_c_literal_to_bool);
11283 }
11284 if (Source->isPointerType() || Source->canDecayToPointerType()) {
11285 // Warn on pointer to bool conversion that is always true.
11286 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
11287 SourceRange(CC));
11288 }
11289 }
11290
11291 // Check implicit casts from Objective-C collection literals to specialized
11292 // collection types, e.g., NSArray<NSString *> *.
11293 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
11294 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
11295 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
11296 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
11297
11298 // Strip vector types.
11299 if (isa<VectorType>(Source)) {
11300 if (!isa<VectorType>(Target)) {
11301 if (S.SourceMgr.isInSystemMacro(CC))
11302 return;
11303 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
11304 }
11305
11306 // If the vector cast is cast between two vectors of the same size, it is
11307 // a bitcast, not a conversion.
11308 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
11309 return;
11310
11311 Source = cast<VectorType>(Source)->getElementType().getTypePtr();
11312 Target = cast<VectorType>(Target)->getElementType().getTypePtr();
11313 }
11314 if (auto VecTy = dyn_cast<VectorType>(Target))
11315 Target = VecTy->getElementType().getTypePtr();
11316
11317 // Strip complex types.
11318 if (isa<ComplexType>(Source)) {
11319 if (!isa<ComplexType>(Target)) {
11320 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
11321 return;
11322
11323 return DiagnoseImpCast(S, E, T, CC,
11324 S.getLangOpts().CPlusPlus
11325 ? diag::err_impcast_complex_scalar
11326 : diag::warn_impcast_complex_scalar);
11327 }
11328
11329 Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
11330 Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
11331 }
11332
11333 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
11334 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
11335
11336 // If the source is floating point...
11337 if (SourceBT && SourceBT->isFloatingPoint()) {
11338 // ...and the target is floating point...
11339 if (TargetBT && TargetBT->isFloatingPoint()) {
11340 // ...then warn if we're dropping FP rank.
11341
11342 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
11343 QualType(SourceBT, 0), QualType(TargetBT, 0));
11344 if (Order > 0) {
11345 // Don't warn about float constants that are precisely
11346 // representable in the target type.
11347 Expr::EvalResult result;
11348 if (E->EvaluateAsRValue(result, S.Context)) {
11349 // Value might be a float, a float vector, or a float complex.
11350 if (IsSameFloatAfterCast(result.Val,
11351 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
11352 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
11353 return;
11354 }
11355
11356 if (S.SourceMgr.isInSystemMacro(CC))
11357 return;
11358
11359 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
11360 }
11361 // ... or possibly if we're increasing rank, too
11362 else if (Order < 0) {
11363 if (S.SourceMgr.isInSystemMacro(CC))
11364 return;
11365
11366 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
11367 }
11368 return;
11369 }
11370
11371 // If the target is integral, always warn.
11372 if (TargetBT && TargetBT->isInteger()) {
11373 if (S.SourceMgr.isInSystemMacro(CC))
11374 return;
11375
11376 DiagnoseFloatingImpCast(S, E, T, CC);
11377 }
11378
11379 // Detect the case where a call result is converted from floating-point to
11380 // to bool, and the final argument to the call is converted from bool, to
11381 // discover this typo:
11382 //
11383 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
11384 //
11385 // FIXME: This is an incredibly special case; is there some more general
11386 // way to detect this class of misplaced-parentheses bug?
11387 if (Target->isBooleanType() && isa<CallExpr>(E)) {
11388 // Check last argument of function call to see if it is an
11389 // implicit cast from a type matching the type the result
11390 // is being cast to.
11391 CallExpr *CEx = cast<CallExpr>(E);
11392 if (unsigned NumArgs = CEx->getNumArgs()) {
11393 Expr *LastA = CEx->getArg(NumArgs - 1);
11394 Expr *InnerE = LastA->IgnoreParenImpCasts();
11395 if (isa<ImplicitCastExpr>(LastA) &&
11396 InnerE->getType()->isBooleanType()) {
11397 // Warn on this floating-point to bool conversion
11398 DiagnoseImpCast(S, E, T, CC,
11399 diag::warn_impcast_floating_point_to_bool);
11400 }
11401 }
11402 }
11403 return;
11404 }
11405
11406 // Valid casts involving fixed point types should be accounted for here.
11407 if (Source->isFixedPointType()) {
11408 if (Target->isUnsaturatedFixedPointType()) {
11409 Expr::EvalResult Result;
11410 if (E->EvaluateAsFixedPoint(Result, S.Context,
11411 Expr::SE_AllowSideEffects)) {
11412 APFixedPoint Value = Result.Val.getFixedPoint();
11413 APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
11414 APFixedPoint MinVal = S.Context.getFixedPointMin(T);
11415 if (Value > MaxVal || Value < MinVal) {
11416 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11417 S.PDiag(diag::warn_impcast_fixed_point_range)
11418 << Value.toString() << T
11419 << E->getSourceRange()
11420 << clang::SourceRange(CC));
11421 return;
11422 }
11423 }
11424 } else if (Target->isIntegerType()) {
11425 Expr::EvalResult Result;
11426 if (E->EvaluateAsFixedPoint(Result, S.Context,
11427 Expr::SE_AllowSideEffects)) {
11428 APFixedPoint FXResult = Result.Val.getFixedPoint();
11429
11430 bool Overflowed;
11431 llvm::APSInt IntResult = FXResult.convertToInt(
11432 S.Context.getIntWidth(T),
11433 Target->isSignedIntegerOrEnumerationType(), &Overflowed);
11434
11435 if (Overflowed) {
11436 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11437 S.PDiag(diag::warn_impcast_fixed_point_range)
11438 << FXResult.toString() << T
11439 << E->getSourceRange()
11440 << clang::SourceRange(CC));
11441 return;
11442 }
11443 }
11444 }
11445 } else if (Target->isUnsaturatedFixedPointType()) {
11446 if (Source->isIntegerType()) {
11447 Expr::EvalResult Result;
11448 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
11449 llvm::APSInt Value = Result.Val.getInt();
11450
11451 bool Overflowed;
11452 APFixedPoint IntResult = APFixedPoint::getFromIntValue(
11453 Value, S.Context.getFixedPointSemantics(T), &Overflowed);
11454
11455 if (Overflowed) {
11456 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11457 S.PDiag(diag::warn_impcast_fixed_point_range)
11458 << Value.toString(/*radix=*/10) << T
11459 << E->getSourceRange()
11460 << clang::SourceRange(CC));
11461 return;
11462 }
11463 }
11464 }
11465 }
11466
11467 DiagnoseNullConversion(S, E, T, CC);
11468
11469 S.DiscardMisalignedMemberAddress(Target, E);
11470
11471 if (!Source->isIntegerType() || !Target->isIntegerType())
11472 return;
11473
11474 // TODO: remove this early return once the false positives for constant->bool
11475 // in templates, macros, etc, are reduced or removed.
11476 if (Target->isSpecificBuiltinType(BuiltinType::Bool))
11477 return;
11478
11479 IntRange SourceRange = GetExprRange(S.Context, E);
11480 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
11481
11482 if (SourceRange.Width > TargetRange.Width) {
11483 // If the source is a constant, use a default-on diagnostic.
11484 // TODO: this should happen for bitfield stores, too.
11485 Expr::EvalResult Result;
11486 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
11487 llvm::APSInt Value(32);
11488 Value = Result.Val.getInt();
11489
11490 if (S.SourceMgr.isInSystemMacro(CC))
11491 return;
11492
11493 std::string PrettySourceValue = Value.toString(10);
11494 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
11495
11496 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11497 S.PDiag(diag::warn_impcast_integer_precision_constant)
11498 << PrettySourceValue << PrettyTargetValue
11499 << E->getType() << T << E->getSourceRange()
11500 << clang::SourceRange(CC));
11501 return;
11502 }
11503
11504 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
11505 if (S.SourceMgr.isInSystemMacro(CC))
11506 return;
11507
11508 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
11509 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
11510 /* pruneControlFlow */ true);
11511 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
11512 }
11513
11514 if (TargetRange.Width > SourceRange.Width) {
11515 if (auto *UO = dyn_cast<UnaryOperator>(E))
11516 if (UO->getOpcode() == UO_Minus)
11517 if (Source->isUnsignedIntegerType()) {
11518 if (Target->isUnsignedIntegerType())
11519 return DiagnoseImpCast(S, E, T, CC,
11520 diag::warn_impcast_high_order_zero_bits);
11521 if (Target->isSignedIntegerType())
11522 return DiagnoseImpCast(S, E, T, CC,
11523 diag::warn_impcast_nonnegative_result);
11524 }
11525 }
11526
11527 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative &&
11528 SourceRange.NonNegative && Source->isSignedIntegerType()) {
11529 // Warn when doing a signed to signed conversion, warn if the positive
11530 // source value is exactly the width of the target type, which will
11531 // cause a negative value to be stored.
11532
11533 Expr::EvalResult Result;
11534 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
11535 !S.SourceMgr.isInSystemMacro(CC)) {
11536 llvm::APSInt Value = Result.Val.getInt();
11537 if (isSameWidthConstantConversion(S, E, T, CC)) {
11538 std::string PrettySourceValue = Value.toString(10);
11539 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
11540
11541 S.DiagRuntimeBehavior(
11542 E->getExprLoc(), E,
11543 S.PDiag(diag::warn_impcast_integer_precision_constant)
11544 << PrettySourceValue << PrettyTargetValue << E->getType() << T
11545 << E->getSourceRange() << clang::SourceRange(CC));
11546 return;
11547 }
11548 }
11549
11550 // Fall through for non-constants to give a sign conversion warning.
11551 }
11552
11553 if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
11554 (!TargetRange.NonNegative && SourceRange.NonNegative &&
11555 SourceRange.Width == TargetRange.Width)) {
11556 if (S.SourceMgr.isInSystemMacro(CC))
11557 return;
11558
11559 unsigned DiagID = diag::warn_impcast_integer_sign;
11560
11561 // Traditionally, gcc has warned about this under -Wsign-compare.
11562 // We also want to warn about it in -Wconversion.
11563 // So if -Wconversion is off, use a completely identical diagnostic
11564 // in the sign-compare group.
11565 // The conditional-checking code will
11566 if (ICContext) {
11567 DiagID = diag::warn_impcast_integer_sign_conditional;
11568 *ICContext = true;
11569 }
11570
11571 return DiagnoseImpCast(S, E, T, CC, DiagID);
11572 }
11573
11574 // Diagnose conversions between different enumeration types.
11575 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
11576 // type, to give us better diagnostics.
11577 QualType SourceType = E->getType();
11578 if (!S.getLangOpts().CPlusPlus) {
11579 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
11580 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
11581 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
11582 SourceType = S.Context.getTypeDeclType(Enum);
11583 Source = S.Context.getCanonicalType(SourceType).getTypePtr();
11584 }
11585 }
11586
11587 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
11588 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
11589 if (SourceEnum->getDecl()->hasNameForLinkage() &&
11590 TargetEnum->getDecl()->hasNameForLinkage() &&
11591 SourceEnum != TargetEnum) {
11592 if (S.SourceMgr.isInSystemMacro(CC))
11593 return;
11594
11595 return DiagnoseImpCast(S, E, SourceType, T, CC,
11596 diag::warn_impcast_different_enum_types);
11597 }
11598}
11599
11600static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
11601 SourceLocation CC, QualType T);
11602
11603static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
11604 SourceLocation CC, bool &ICContext) {
11605 E = E->IgnoreParenImpCasts();
11606
11607 if (isa<ConditionalOperator>(E))
11608 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
11609
11610 AnalyzeImplicitConversions(S, E, CC);
11611 if (E->getType() != T)
11612 return CheckImplicitConversion(S, E, T, CC, &ICContext);
11613}
11614
11615static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
11616 SourceLocation CC, QualType T) {
11617 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
11618
11619 bool Suspicious = false;
11620 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
11621 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
11622
11623 // If -Wconversion would have warned about either of the candidates
11624 // for a signedness conversion to the context type...
11625 if (!Suspicious) return;
11626
11627 // ...but it's currently ignored...
11628 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
11629 return;
11630
11631 // ...then check whether it would have warned about either of the
11632 // candidates for a signedness conversion to the condition type.
11633 if (E->getType() == T) return;
11634
11635 Suspicious = false;
11636 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
11637 E->getType(), CC, &Suspicious);
11638 if (!Suspicious)
11639 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
11640 E->getType(), CC, &Suspicious);
11641}
11642
11643/// Check conversion of given expression to boolean.
11644/// Input argument E is a logical expression.
11645static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
11646 if (S.getLangOpts().Bool)
11647 return;
11648 if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
11649 return;
11650 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
11651}
11652
11653/// AnalyzeImplicitConversions - Find and report any interesting
11654/// implicit conversions in the given expression. There are a couple
11655/// of competing diagnostics here, -Wconversion and -Wsign-compare.
11656static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
11657 SourceLocation CC) {
11658 QualType T = OrigE->getType();
11659 Expr *E = OrigE->IgnoreParenImpCasts();
11660
11661 if (E->isTypeDependent() || E->isValueDependent())
11662 return;
11663
11664 // For conditional operators, we analyze the arguments as if they
11665 // were being fed directly into the output.
11666 if (isa<ConditionalOperator>(E)) {
11667 ConditionalOperator *CO = cast<ConditionalOperator>(E);
11668 CheckConditionalOperator(S, CO, CC, T);
11669 return;
11670 }
11671
11672 // Check implicit argument conversions for function calls.
11673 if (CallExpr *Call = dyn_cast<CallExpr>(E))
11674 CheckImplicitArgumentConversions(S, Call, CC);
11675
11676 // Go ahead and check any implicit conversions we might have skipped.
11677 // The non-canonical typecheck is just an optimization;
11678 // CheckImplicitConversion will filter out dead implicit conversions.
11679 if (E->getType() != T)
11680 CheckImplicitConversion(S, E, T, CC);
11681
11682 // Now continue drilling into this expression.
11683
11684 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
11685 // The bound subexpressions in a PseudoObjectExpr are not reachable
11686 // as transitive children.
11687 // FIXME: Use a more uniform representation for this.
11688 for (auto *SE : POE->semantics())
11689 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
11690 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC);
11691 }
11692
11693 // Skip past explicit casts.
11694 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) {
11695 E = CE->getSubExpr()->IgnoreParenImpCasts();
11696 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
11697 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
11698 return AnalyzeImplicitConversions(S, E, CC);
11699 }
11700
11701 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
11702 // Do a somewhat different check with comparison operators.
11703 if (BO->isComparisonOp())
11704 return AnalyzeComparison(S, BO);
11705
11706 // And with simple assignments.
11707 if (BO->getOpcode() == BO_Assign)
11708 return AnalyzeAssignment(S, BO);
11709 // And with compound assignments.
11710 if (BO->isAssignmentOp())
11711 return AnalyzeCompoundAssignment(S, BO);
11712 }
11713
11714 // These break the otherwise-useful invariant below. Fortunately,
11715 // we don't really need to recurse into them, because any internal
11716 // expressions should have been analyzed already when they were
11717 // built into statements.
11718 if (isa<StmtExpr>(E)) return;
11719
11720 // Don't descend into unevaluated contexts.
11721 if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
11722
11723 // Now just recurse over the expression's children.
11724 CC = E->getExprLoc();
11725 BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
11726 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
11727 for (Stmt *SubStmt : E->children()) {
11728 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
11729 if (!ChildExpr)
11730 continue;
11731
11732 if (IsLogicalAndOperator &&
11733 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
11734 // Ignore checking string literals that are in logical and operators.
11735 // This is a common pattern for asserts.
11736 continue;
11737 AnalyzeImplicitConversions(S, ChildExpr, CC);
11738 }
11739
11740 if (BO && BO->isLogicalOp()) {
11741 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
11742 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
11743 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
11744
11745 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
11746 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
11747 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
11748 }
11749
11750 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
11751 if (U->getOpcode() == UO_LNot) {
11752 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
11753 } else if (U->getOpcode() != UO_AddrOf) {
11754 if (U->getSubExpr()->getType()->isAtomicType())
11755 S.Diag(U->getSubExpr()->getBeginLoc(),
11756 diag::warn_atomic_implicit_seq_cst);
11757 }
11758 }
11759}
11760
11761/// Diagnose integer type and any valid implicit conversion to it.
11762static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
11763 // Taking into account implicit conversions,
11764 // allow any integer.
11765 if (!E->getType()->isIntegerType()) {
11766 S.Diag(E->getBeginLoc(),
11767 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
11768 return true;
11769 }
11770 // Potentially emit standard warnings for implicit conversions if enabled
11771 // using -Wconversion.
11772 CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
11773 return false;
11774}
11775
11776// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
11777// Returns true when emitting a warning about taking the address of a reference.
11778static bool CheckForReference(Sema &SemaRef, const Expr *E,
11779 const PartialDiagnostic &PD) {
11780 E = E->IgnoreParenImpCasts();
11781
11782 const FunctionDecl *FD = nullptr;
11783
11784 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
11785 if (!DRE->getDecl()->getType()->isReferenceType())
11786 return false;
11787 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) {
11788 if (!M->getMemberDecl()->getType()->isReferenceType())
11789 return false;
11790 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) {
11791 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType())
11792 return false;
11793 FD = Call->getDirectCallee();
11794 } else {
11795 return false;
11796 }
11797
11798 SemaRef.Diag(E->getExprLoc(), PD);
11799
11800 // If possible, point to location of function.
11801 if (FD) {
11802 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
11803 }
11804
11805 return true;
11806}
11807
11808// Returns true if the SourceLocation is expanded from any macro body.
11809// Returns false if the SourceLocation is invalid, is from not in a macro
11810// expansion, or is from expanded from a top-level macro argument.
11811static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
11812 if (Loc.isInvalid())
11813 return false;
11814
11815 while (Loc.isMacroID()) {
11816 if (SM.isMacroBodyExpansion(Loc))
11817 return true;
11818 Loc = SM.getImmediateMacroCallerLoc(Loc);
11819 }
11820
11821 return false;
11822}
11823
11824/// Diagnose pointers that are always non-null.
11825/// \param E the expression containing the pointer
11826/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
11827/// compared to a null pointer
11828/// \param IsEqual True when the comparison is equal to a null pointer
11829/// \param Range Extra SourceRange to highlight in the diagnostic
11830void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
11831 Expr::NullPointerConstantKind NullKind,
11832 bool IsEqual, SourceRange Range) {
11833 if (!E)
11834 return;
11835
11836 // Don't warn inside macros.
11837 if (E->getExprLoc().isMacroID()) {
11838 const SourceManager &SM = getSourceManager();
11839 if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
11840 IsInAnyMacroBody(SM, Range.getBegin()))
11841 return;
11842 }
11843 E = E->IgnoreImpCasts();
11844
11845 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
11846
11847 if (isa<CXXThisExpr>(E)) {
11848 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
11849 : diag::warn_this_bool_conversion;
11850 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
11851 return;
11852 }
11853
11854 bool IsAddressOf = false;
11855
11856 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
11857 if (UO->getOpcode() != UO_AddrOf)
11858 return;
11859 IsAddressOf = true;
11860 E = UO->getSubExpr();
11861 }
11862
11863 if (IsAddressOf) {
11864 unsigned DiagID = IsCompare
11865 ? diag::warn_address_of_reference_null_compare
11866 : diag::warn_address_of_reference_bool_conversion;
11867 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
11868 << IsEqual;
11869 if (CheckForReference(*this, E, PD)) {
11870 return;
11871 }
11872 }
11873
11874 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
11875 bool IsParam = isa<NonNullAttr>(NonnullAttr);
11876 std::string Str;
11877 llvm::raw_string_ostream S(Str);
11878 E->printPretty(S, nullptr, getPrintingPolicy());
11879 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
11880 : diag::warn_cast_nonnull_to_bool;
11881 Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
11882 << E->getSourceRange() << Range << IsEqual;
11883 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
11884 };
11885
11886 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
11887 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
11888 if (auto *Callee = Call->getDirectCallee()) {
11889 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
11890 ComplainAboutNonnullParamOrCall(A);
11891 return;
11892 }
11893 }
11894 }
11895
11896 // Expect to find a single Decl. Skip anything more complicated.
11897 ValueDecl *D = nullptr;
11898 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
11899 D = R->getDecl();
11900 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
11901 D = M->getMemberDecl();
11902 }
11903
11904 // Weak Decls can be null.
11905 if (!D || D->isWeak())
11906 return;
11907
11908 // Check for parameter decl with nonnull attribute
11909 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
11910 if (getCurFunction() &&
11911 !getCurFunction()->ModifiedNonNullParams.count(PV)) {
11912 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
11913 ComplainAboutNonnullParamOrCall(A);
11914 return;
11915 }
11916
11917 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
11918 // Skip function template not specialized yet.
11919 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
11920 return;
11921 auto ParamIter = llvm::find(FD->parameters(), PV);
11922 assert(ParamIter != FD->param_end());
11923 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
11924
11925 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
11926 if (!NonNull->args_size()) {
11927 ComplainAboutNonnullParamOrCall(NonNull);
11928 return;
11929 }
11930
11931 for (const ParamIdx &ArgNo : NonNull->args()) {
11932 if (ArgNo.getASTIndex() == ParamNo) {
11933 ComplainAboutNonnullParamOrCall(NonNull);
11934 return;
11935 }
11936 }
11937 }
11938 }
11939 }
11940 }
11941
11942 QualType T = D->getType();
11943 const bool IsArray = T->isArrayType();
11944 const bool IsFunction = T->isFunctionType();
11945
11946 // Address of function is used to silence the function warning.
11947 if (IsAddressOf && IsFunction) {
11948 return;
11949 }
11950
11951 // Found nothing.
11952 if (!IsAddressOf && !IsFunction && !IsArray)
11953 return;
11954
11955 // Pretty print the expression for the diagnostic.
11956 std::string Str;
11957 llvm::raw_string_ostream S(Str);
11958 E->printPretty(S, nullptr, getPrintingPolicy());
11959
11960 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
11961 : diag::warn_impcast_pointer_to_bool;
11962 enum {
11963 AddressOf,
11964 FunctionPointer,
11965 ArrayPointer
11966 } DiagType;
11967 if (IsAddressOf)
11968 DiagType = AddressOf;
11969 else if (IsFunction)
11970 DiagType = FunctionPointer;
11971 else if (IsArray)
11972 DiagType = ArrayPointer;
11973 else
11974 llvm_unreachable("Could not determine diagnostic.");
11975 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
11976 << Range << IsEqual;
11977
11978 if (!IsFunction)
11979 return;
11980
11981 // Suggest '&' to silence the function warning.
11982 Diag(E->getExprLoc(), diag::note_function_warning_silence)
11983 << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
11984
11985 // Check to see if '()' fixit should be emitted.
11986 QualType ReturnType;
11987 UnresolvedSet<4> NonTemplateOverloads;
11988 tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
11989 if (ReturnType.isNull())
11990 return;
11991
11992 if (IsCompare) {
11993 // There are two cases here. If there is null constant, the only suggest
11994 // for a pointer return type. If the null is 0, then suggest if the return
11995 // type is a pointer or an integer type.
11996 if (!ReturnType->isPointerType()) {
11997 if (NullKind == Expr::NPCK_ZeroExpression ||
11998 NullKind == Expr::NPCK_ZeroLiteral) {
11999 if (!ReturnType->isIntegerType())
12000 return;
12001 } else {
12002 return;
12003 }
12004 }
12005 } else { // !IsCompare
12006 // For function to bool, only suggest if the function pointer has bool
12007 // return type.
12008 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
12009 return;
12010 }
12011 Diag(E->getExprLoc(), diag::note_function_to_function_call)
12012 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
12013}
12014
12015/// Diagnoses "dangerous" implicit conversions within the given
12016/// expression (which is a full expression). Implements -Wconversion
12017/// and -Wsign-compare.
12018///
12019/// \param CC the "context" location of the implicit conversion, i.e.
12020/// the most location of the syntactic entity requiring the implicit
12021/// conversion
12022void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
12023 // Don't diagnose in unevaluated contexts.
12024 if (isUnevaluatedContext())
12025 return;
12026
12027 // Don't diagnose for value- or type-dependent expressions.
12028 if (E->isTypeDependent() || E->isValueDependent())
12029 return;
12030
12031 // Check for array bounds violations in cases where the check isn't triggered
12032 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
12033 // ArraySubscriptExpr is on the RHS of a variable initialization.
12034 CheckArrayAccess(E);
12035
12036 // This is not the right CC for (e.g.) a variable initialization.
12037 AnalyzeImplicitConversions(*this, E, CC);
12038}
12039
12040/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
12041/// Input argument E is a logical expression.
12042void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
12043 ::CheckBoolLikeConversion(*this, E, CC);
12044}
12045
12046/// Diagnose when expression is an integer constant expression and its evaluation
12047/// results in integer overflow
12048void Sema::CheckForIntOverflow (Expr *E) {
12049 // Use a work list to deal with nested struct initializers.
12050 SmallVector<Expr *, 2> Exprs(1, E);
12051
12052 do {
12053 Expr *OriginalE = Exprs.pop_back_val();
12054 Expr *E = OriginalE->IgnoreParenCasts();
12055
12056 if (isa<BinaryOperator>(E)) {
12057 E->EvaluateForOverflow(Context);
12058 continue;
12059 }
12060
12061 if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
12062 Exprs.append(InitList->inits().begin(), InitList->inits().end());
12063 else if (isa<ObjCBoxedExpr>(OriginalE))
12064 E->EvaluateForOverflow(Context);
12065 else if (auto Call = dyn_cast<CallExpr>(E))
12066 Exprs.append(Call->arg_begin(), Call->arg_end());
12067 else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
12068 Exprs.append(Message->arg_begin(), Message->arg_end());
12069 } while (!Exprs.empty());
12070}
12071
12072namespace {
12073
12074/// Visitor for expressions which looks for unsequenced operations on the
12075/// same object.
12076class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
12077 using Base = EvaluatedExprVisitor<SequenceChecker>;
12078
12079 /// A tree of sequenced regions within an expression. Two regions are
12080 /// unsequenced if one is an ancestor or a descendent of the other. When we
12081 /// finish processing an expression with sequencing, such as a comma
12082 /// expression, we fold its tree nodes into its parent, since they are
12083 /// unsequenced with respect to nodes we will visit later.
12084 class SequenceTree {
12085 struct Value {
12086 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
12087 unsigned Parent : 31;
12088 unsigned Merged : 1;
12089 };
12090 SmallVector<Value, 8> Values;
12091
12092 public:
12093 /// A region within an expression which may be sequenced with respect
12094 /// to some other region.
12095 class Seq {
12096 friend class SequenceTree;
12097
12098 unsigned Index;
12099
12100 explicit Seq(unsigned N) : Index(N) {}
12101
12102 public:
12103 Seq() : Index(0) {}
12104 };
12105
12106 SequenceTree() { Values.push_back(Value(0)); }
12107 Seq root() const { return Seq(0); }
12108
12109 /// Create a new sequence of operations, which is an unsequenced
12110 /// subset of \p Parent. This sequence of operations is sequenced with
12111 /// respect to other children of \p Parent.
12112 Seq allocate(Seq Parent) {
12113 Values.push_back(Value(Parent.Index));
12114 return Seq(Values.size() - 1);
12115 }
12116
12117 /// Merge a sequence of operations into its parent.
12118 void merge(Seq S) {
12119 Values[S.Index].Merged = true;
12120 }
12121
12122 /// Determine whether two operations are unsequenced. This operation
12123 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
12124 /// should have been merged into its parent as appropriate.
12125 bool isUnsequenced(Seq Cur, Seq Old) {
12126 unsigned C = representative(Cur.Index);
12127 unsigned Target = representative(Old.Index);
12128 while (C >= Target) {
12129 if (C == Target)
12130 return true;
12131 C = Values[C].Parent;
12132 }
12133 return false;
12134 }
12135
12136 private:
12137 /// Pick a representative for a sequence.
12138 unsigned representative(unsigned K) {
12139 if (Values[K].Merged)
12140 // Perform path compression as we go.
12141 return Values[K].Parent = representative(Values[K].Parent);
12142 return K;
12143 }
12144 };
12145
12146 /// An object for which we can track unsequenced uses.
12147 using Object = NamedDecl *;
12148
12149 /// Different flavors of object usage which we track. We only track the
12150 /// least-sequenced usage of each kind.
12151 enum UsageKind {
12152 /// A read of an object. Multiple unsequenced reads are OK.
12153 UK_Use,
12154
12155 /// A modification of an object which is sequenced before the value
12156 /// computation of the expression, such as ++n in C++.
12157 UK_ModAsValue,
12158
12159 /// A modification of an object which is not sequenced before the value
12160 /// computation of the expression, such as n++.
12161 UK_ModAsSideEffect,
12162
12163 UK_Count = UK_ModAsSideEffect + 1
12164 };
12165
12166 struct Usage {
12167 Expr *Use;
12168 SequenceTree::Seq Seq;
12169
12170 Usage() : Use(nullptr), Seq() {}
12171 };
12172
12173 struct UsageInfo {
12174 Usage Uses[UK_Count];
12175
12176 /// Have we issued a diagnostic for this variable already?
12177 bool Diagnosed;
12178
12179 UsageInfo() : Uses(), Diagnosed(false) {}
12180 };
12181 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
12182
12183 Sema &SemaRef;
12184
12185 /// Sequenced regions within the expression.
12186 SequenceTree Tree;
12187
12188 /// Declaration modifications and references which we have seen.
12189 UsageInfoMap UsageMap;
12190
12191 /// The region we are currently within.
12192 SequenceTree::Seq Region;
12193
12194 /// Filled in with declarations which were modified as a side-effect
12195 /// (that is, post-increment operations).
12196 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
12197
12198 /// Expressions to check later. We defer checking these to reduce
12199 /// stack usage.
12200 SmallVectorImpl<Expr *> &WorkList;
12201
12202 /// RAII object wrapping the visitation of a sequenced subexpression of an
12203 /// expression. At the end of this process, the side-effects of the evaluation
12204 /// become sequenced with respect to the value computation of the result, so
12205 /// we downgrade any UK_ModAsSideEffect within the evaluation to
12206 /// UK_ModAsValue.
12207 struct SequencedSubexpression {
12208 SequencedSubexpression(SequenceChecker &Self)
12209 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
12210 Self.ModAsSideEffect = &ModAsSideEffect;
12211 }
12212
12213 ~SequencedSubexpression() {
12214 for (auto &M : llvm::reverse(ModAsSideEffect)) {
12215 UsageInfo &U = Self.UsageMap[M.first];
12216 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect];
12217 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue);
12218 SideEffectUsage = M.second;
12219 }
12220 Self.ModAsSideEffect = OldModAsSideEffect;
12221 }
12222
12223 SequenceChecker &Self;
12224 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
12225 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
12226 };
12227
12228 /// RAII object wrapping the visitation of a subexpression which we might
12229 /// choose to evaluate as a constant. If any subexpression is evaluated and
12230 /// found to be non-constant, this allows us to suppress the evaluation of
12231 /// the outer expression.
12232 class EvaluationTracker {
12233 public:
12234 EvaluationTracker(SequenceChecker &Self)
12235 : Self(Self), Prev(Self.EvalTracker) {
12236 Self.EvalTracker = this;
12237 }
12238
12239 ~EvaluationTracker() {
12240 Self.EvalTracker = Prev;
12241 if (Prev)
12242 Prev->EvalOK &= EvalOK;
12243 }
12244
12245 bool evaluate(const Expr *E, bool &Result) {
12246 if (!EvalOK || E->isValueDependent())
12247 return false;
12248 EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context);
12249 return EvalOK;
12250 }
12251
12252 private:
12253 SequenceChecker &Self;
12254 EvaluationTracker *Prev;
12255 bool EvalOK = true;
12256 } *EvalTracker = nullptr;
12257
12258 /// Find the object which is produced by the specified expression,
12259 /// if any.
12260 Object getObject(Expr *E, bool Mod) const {
12261 E = E->IgnoreParenCasts();
12262 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
12263 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
12264 return getObject(UO->getSubExpr(), Mod);
12265 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
12266 if (BO->getOpcode() == BO_Comma)
12267 return getObject(BO->getRHS(), Mod);
12268 if (Mod && BO->isAssignmentOp())
12269 return getObject(BO->getLHS(), Mod);
12270 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
12271 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
12272 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
12273 return ME->getMemberDecl();
12274 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
12275 // FIXME: If this is a reference, map through to its value.
12276 return DRE->getDecl();
12277 return nullptr;
12278 }
12279
12280 /// Note that an object was modified or used by an expression.
12281 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) {
12282 Usage &U = UI.Uses[UK];
12283 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) {
12284 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
12285 ModAsSideEffect->push_back(std::make_pair(O, U));
12286 U.Use = Ref;
12287 U.Seq = Region;
12288 }
12289 }
12290
12291 /// Check whether a modification or use conflicts with a prior usage.
12292 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind,
12293 bool IsModMod) {
12294 if (UI.Diagnosed)
12295 return;
12296
12297 const Usage &U = UI.Uses[OtherKind];
12298 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq))
12299 return;
12300
12301 Expr *Mod = U.Use;
12302 Expr *ModOrUse = Ref;
12303 if (OtherKind == UK_Use)
12304 std::swap(Mod, ModOrUse);
12305
12306 SemaRef.DiagRuntimeBehavior(
12307 Mod->getExprLoc(), {Mod, ModOrUse},
12308 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
12309 : diag::warn_unsequenced_mod_use)
12310 << O << SourceRange(ModOrUse->getExprLoc()));
12311 UI.Diagnosed = true;
12312 }
12313
12314 void notePreUse(Object O, Expr *Use) {
12315 UsageInfo &U = UsageMap[O];
12316 // Uses conflict with other modifications.
12317 checkUsage(O, U, Use, UK_ModAsValue, false);
12318 }
12319
12320 void notePostUse(Object O, Expr *Use) {
12321 UsageInfo &U = UsageMap[O];
12322 checkUsage(O, U, Use, UK_ModAsSideEffect, false);
12323 addUsage(U, O, Use, UK_Use);
12324 }
12325
12326 void notePreMod(Object O, Expr *Mod) {
12327 UsageInfo &U = UsageMap[O];
12328 // Modifications conflict with other modifications and with uses.
12329 checkUsage(O, U, Mod, UK_ModAsValue, true);
12330 checkUsage(O, U, Mod, UK_Use, false);
12331 }
12332
12333 void notePostMod(Object O, Expr *Use, UsageKind UK) {
12334 UsageInfo &U = UsageMap[O];
12335 checkUsage(O, U, Use, UK_ModAsSideEffect, true);
12336 addUsage(U, O, Use, UK);
12337 }
12338
12339public:
12340 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList)
12341 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
12342 Visit(E);
12343 }
12344
12345 void VisitStmt(Stmt *S) {
12346 // Skip all statements which aren't expressions for now.
12347 }
12348
12349 void VisitExpr(Expr *E) {
12350 // By default, just recurse to evaluated subexpressions.
12351 Base::VisitStmt(E);
12352 }
12353
12354 void VisitCastExpr(CastExpr *E) {
12355 Object O = Object();
12356 if (E->getCastKind() == CK_LValueToRValue)
12357 O = getObject(E->getSubExpr(), false);
12358
12359 if (O)
12360 notePreUse(O, E);
12361 VisitExpr(E);
12362 if (O)
12363 notePostUse(O, E);
12364 }
12365
12366 void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) {
12367 SequenceTree::Seq BeforeRegion = Tree.allocate(Region);
12368 SequenceTree::Seq AfterRegion = Tree.allocate(Region);
12369 SequenceTree::Seq OldRegion = Region;
12370
12371 {
12372 SequencedSubexpression SeqBefore(*this);
12373 Region = BeforeRegion;
12374 Visit(SequencedBefore);
12375 }
12376
12377 Region = AfterRegion;
12378 Visit(SequencedAfter);
12379
12380 Region = OldRegion;
12381
12382 Tree.merge(BeforeRegion);
12383 Tree.merge(AfterRegion);
12384 }
12385
12386 void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) {
12387 // C++17 [expr.sub]p1:
12388 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
12389 // expression E1 is sequenced before the expression E2.
12390 if (SemaRef.getLangOpts().CPlusPlus17)
12391 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS());
12392 else
12393 Base::VisitStmt(ASE);
12394 }
12395
12396 void VisitBinComma(BinaryOperator *BO) {
12397 // C++11 [expr.comma]p1:
12398 // Every value computation and side effect associated with the left
12399 // expression is sequenced before every value computation and side
12400 // effect associated with the right expression.
12401 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
12402 }
12403
12404 void VisitBinAssign(BinaryOperator *BO) {
12405 // The modification is sequenced after the value computation of the LHS
12406 // and RHS, so check it before inspecting the operands and update the
12407 // map afterwards.
12408 Object O = getObject(BO->getLHS(), true);
12409 if (!O)
12410 return VisitExpr(BO);
12411
12412 notePreMod(O, BO);
12413
12414 // C++11 [expr.ass]p7:
12415 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated
12416 // only once.
12417 //
12418 // Therefore, for a compound assignment operator, O is considered used
12419 // everywhere except within the evaluation of E1 itself.
12420 if (isa<CompoundAssignOperator>(BO))
12421 notePreUse(O, BO);
12422
12423 Visit(BO->getLHS());
12424
12425 if (isa<CompoundAssignOperator>(BO))
12426 notePostUse(O, BO);
12427
12428 Visit(BO->getRHS());
12429
12430 // C++11 [expr.ass]p1:
12431 // the assignment is sequenced [...] before the value computation of the
12432 // assignment expression.
12433 // C11 6.5.16/3 has no such rule.
12434 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
12435 : UK_ModAsSideEffect);
12436 }
12437
12438 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) {
12439 VisitBinAssign(CAO);
12440 }
12441
12442 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
12443 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
12444 void VisitUnaryPreIncDec(UnaryOperator *UO) {
12445 Object O = getObject(UO->getSubExpr(), true);
12446 if (!O)
12447 return VisitExpr(UO);
12448
12449 notePreMod(O, UO);
12450 Visit(UO->getSubExpr());
12451 // C++11 [expr.pre.incr]p1:
12452 // the expression ++x is equivalent to x+=1
12453 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
12454 : UK_ModAsSideEffect);
12455 }
12456
12457 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
12458 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
12459 void VisitUnaryPostIncDec(UnaryOperator *UO) {
12460 Object O = getObject(UO->getSubExpr(), true);
12461 if (!O)
12462 return VisitExpr(UO);
12463
12464 notePreMod(O, UO);
12465 Visit(UO->getSubExpr());
12466 notePostMod(O, UO, UK_ModAsSideEffect);
12467 }
12468
12469 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated.
12470 void VisitBinLOr(BinaryOperator *BO) {
12471 // The side-effects of the LHS of an '&&' are sequenced before the
12472 // value computation of the RHS, and hence before the value computation
12473 // of the '&&' itself, unless the LHS evaluates to zero. We treat them
12474 // as if they were unconditionally sequenced.
12475 EvaluationTracker Eval(*this);
12476 {
12477 SequencedSubexpression Sequenced(*this);
12478 Visit(BO->getLHS());
12479 }
12480
12481 bool Result;
12482 if (Eval.evaluate(BO->getLHS(), Result)) {
12483 if (!Result)
12484 Visit(BO->getRHS());
12485 } else {
12486 // Check for unsequenced operations in the RHS, treating it as an
12487 // entirely separate evaluation.
12488 //
12489 // FIXME: If there are operations in the RHS which are unsequenced
12490 // with respect to operations outside the RHS, and those operations
12491 // are unconditionally evaluated, diagnose them.
12492 WorkList.push_back(BO->getRHS());
12493 }
12494 }
12495 void VisitBinLAnd(BinaryOperator *BO) {
12496 EvaluationTracker Eval(*this);
12497 {
12498 SequencedSubexpression Sequenced(*this);
12499 Visit(BO->getLHS());
12500 }
12501
12502 bool Result;
12503 if (Eval.evaluate(BO->getLHS(), Result)) {
12504 if (Result)
12505 Visit(BO->getRHS());
12506 } else {
12507 WorkList.push_back(BO->getRHS());
12508 }
12509 }
12510
12511 // Only visit the condition, unless we can be sure which subexpression will
12512 // be chosen.
12513 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) {
12514 EvaluationTracker Eval(*this);
12515 {
12516 SequencedSubexpression Sequenced(*this);
12517 Visit(CO->getCond());
12518 }
12519
12520 bool Result;
12521 if (Eval.evaluate(CO->getCond(), Result))
12522 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr());
12523 else {
12524 WorkList.push_back(CO->getTrueExpr());
12525 WorkList.push_back(CO->getFalseExpr());
12526 }
12527 }
12528
12529 void VisitCallExpr(CallExpr *CE) {
12530 // C++11 [intro.execution]p15:
12531 // When calling a function [...], every value computation and side effect
12532 // associated with any argument expression, or with the postfix expression
12533 // designating the called function, is sequenced before execution of every
12534 // expression or statement in the body of the function [and thus before
12535 // the value computation of its result].
12536 SequencedSubexpression Sequenced(*this);
12537 Base::VisitCallExpr(CE);
12538
12539 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
12540 }
12541
12542 void VisitCXXConstructExpr(CXXConstructExpr *CCE) {
12543 // This is a call, so all subexpressions are sequenced before the result.
12544 SequencedSubexpression Sequenced(*this);
12545
12546 if (!CCE->isListInitialization())
12547 return VisitExpr(CCE);
12548
12549 // In C++11, list initializations are sequenced.
12550 SmallVector<SequenceTree::Seq, 32> Elts;
12551 SequenceTree::Seq Parent = Region;
12552 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(),
12553 E = CCE->arg_end();
12554 I != E; ++I) {
12555 Region = Tree.allocate(Parent);
12556 Elts.push_back(Region);
12557 Visit(*I);
12558 }
12559
12560 // Forget that the initializers are sequenced.
12561 Region = Parent;
12562 for (unsigned I = 0; I < Elts.size(); ++I)
12563 Tree.merge(Elts[I]);
12564 }
12565
12566 void VisitInitListExpr(InitListExpr *ILE) {
12567 if (!SemaRef.getLangOpts().CPlusPlus11)
12568 return VisitExpr(ILE);
12569
12570 // In C++11, list initializations are sequenced.
12571 SmallVector<SequenceTree::Seq, 32> Elts;
12572 SequenceTree::Seq Parent = Region;
12573 for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
12574 Expr *E = ILE->getInit(I);
12575 if (!E) continue;
12576 Region = Tree.allocate(Parent);
12577 Elts.push_back(Region);
12578 Visit(E);
12579 }
12580
12581 // Forget that the initializers are sequenced.
12582 Region = Parent;
12583 for (unsigned I = 0; I < Elts.size(); ++I)
12584 Tree.merge(Elts[I]);
12585 }
12586};
12587
12588} // namespace
12589
12590void Sema::CheckUnsequencedOperations(Expr *E) {
12591 SmallVector<Expr *, 8> WorkList;
12592 WorkList.push_back(E);
12593 while (!WorkList.empty()) {
12594 Expr *Item = WorkList.pop_back_val();
12595 SequenceChecker(*this, Item, WorkList);
12596 }
12597}
12598
12599void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
12600 bool IsConstexpr) {
12601 CheckImplicitConversions(E, CheckLoc);
12602 if (!E->isInstantiationDependent())
12603 CheckUnsequencedOperations(E);
12604 if (!IsConstexpr && !E->isValueDependent())
12605 CheckForIntOverflow(E);
12606 DiagnoseMisalignedMembers();
12607}
12608
12609void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
12610 FieldDecl *BitField,
12611 Expr *Init) {
12612 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
12613}
12614
12615static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
12616 SourceLocation Loc) {
12617 if (!PType->isVariablyModifiedType())
12618 return;
12619 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) {
12620 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc);
12621 return;
12622 }
12623 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) {
12624 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc);
12625 return;
12626 }
12627 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) {
12628 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc);
12629 return;
12630 }
12631
12632 const ArrayType *AT = S.Context.getAsArrayType(PType);
12633 if (!AT)
12634 return;
12635
12636 if (AT->getSizeModifier() != ArrayType::Star) {
12637 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
12638 return;
12639 }
12640
12641 S.Diag(Loc, diag::err_array_star_in_function_definition);
12642}
12643
12644/// CheckParmsForFunctionDef - Check that the parameters of the given
12645/// function are appropriate for the definition of a function. This
12646/// takes care of any checks that cannot be performed on the
12647/// declaration itself, e.g., that the types of each of the function
12648/// parameters are complete.
12649bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
12650 bool CheckParameterNames) {
12651 bool HasInvalidParm = false;
12652 for (ParmVarDecl *Param : Parameters) {
12653 // C99 6.7.5.3p4: the parameters in a parameter type list in a
12654 // function declarator that is part of a function definition of
12655 // that function shall not have incomplete type.
12656 //
12657 // This is also C++ [dcl.fct]p6.
12658 if (!Param->isInvalidDecl() &&
12659 RequireCompleteType(Param->getLocation(), Param->getType(),
12660 diag::err_typecheck_decl_incomplete_type)) {
12661 Param->setInvalidDecl();
12662 HasInvalidParm = true;
12663 }
12664
12665 // C99 6.9.1p5: If the declarator includes a parameter type list, the
12666 // declaration of each parameter shall include an identifier.
12667 if (CheckParameterNames &&
12668 Param->getIdentifier() == nullptr &&
12669 !Param->isImplicit() &&
12670 !getLangOpts().CPlusPlus)
12671 Diag(Param->getLocation(), diag::err_parameter_name_omitted);
12672
12673 // C99 6.7.5.3p12:
12674 // If the function declarator is not part of a definition of that
12675 // function, parameters may have incomplete type and may use the [*]
12676 // notation in their sequences of declarator specifiers to specify
12677 // variable length array types.
12678 QualType PType = Param->getOriginalType();
12679 // FIXME: This diagnostic should point the '[*]' if source-location
12680 // information is added for it.
12681 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
12682
12683 // If the parameter is a c++ class type and it has to be destructed in the
12684 // callee function, declare the destructor so that it can be called by the
12685 // callee function. Do not perform any direct access check on the dtor here.
12686 if (!Param->isInvalidDecl()) {
12687 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
12688 if (!ClassDecl->isInvalidDecl() &&
12689 !ClassDecl->hasIrrelevantDestructor() &&
12690 !ClassDecl->isDependentContext() &&
12691 ClassDecl->isParamDestroyedInCallee()) {
12692 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
12693 MarkFunctionReferenced(Param->getLocation(), Destructor);
12694 DiagnoseUseOfDecl(Destructor, Param->getLocation());
12695 }
12696 }
12697 }
12698
12699 // Parameters with the pass_object_size attribute only need to be marked
12700 // constant at function definitions. Because we lack information about
12701 // whether we're on a declaration or definition when we're instantiating the
12702 // attribute, we need to check for constness here.
12703 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
12704 if (!Param->getType().isConstQualified())
12705 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
12706 << Attr->getSpelling() << 1;
12707
12708 // Check for parameter names shadowing fields from the class.
12709 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
12710 // The owning context for the parameter should be the function, but we
12711 // want to see if this function's declaration context is a record.
12712 DeclContext *DC = Param->getDeclContext();
12713 if (DC && DC->isFunctionOrMethod()) {
12714 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
12715 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(),
12716 RD, /*DeclIsField*/ false);
12717 }
12718 }
12719 }
12720
12721 return HasInvalidParm;
12722}
12723
12724/// A helper function to get the alignment of a Decl referred to by DeclRefExpr
12725/// or MemberExpr.
12726static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign,
12727 ASTContext &Context) {
12728 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
12729 return Context.getDeclAlign(DRE->getDecl());
12730
12731 if (const auto *ME = dyn_cast<MemberExpr>(E))
12732 return Context.getDeclAlign(ME->getMemberDecl());
12733
12734 return TypeAlign;
12735}
12736
12737/// CheckCastAlign - Implements -Wcast-align, which warns when a
12738/// pointer cast increases the alignment requirements.
12739void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
12740 // This is actually a lot of work to potentially be doing on every
12741 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
12742 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
12743 return;
12744
12745 // Ignore dependent types.
12746 if (T->isDependentType() || Op->getType()->isDependentType())
12747 return;
12748
12749 // Require that the destination be a pointer type.
12750 const PointerType *DestPtr = T->getAs<PointerType>();
12751 if (!DestPtr) return;
12752
12753 // If the destination has alignment 1, we're done.
12754 QualType DestPointee = DestPtr->getPointeeType();
12755 if (DestPointee->isIncompleteType()) return;
12756 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
12757 if (DestAlign.isOne()) return;
12758
12759 // Require that the source be a pointer type.
12760 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
12761 if (!SrcPtr) return;
12762 QualType SrcPointee = SrcPtr->getPointeeType();
12763
12764 // Whitelist casts from cv void*. We already implicitly
12765 // whitelisted casts to cv void*, since they have alignment 1.
12766 // Also whitelist casts involving incomplete types, which implicitly
12767 // includes 'void'.
12768 if (SrcPointee->isIncompleteType()) return;
12769
12770 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
12771
12772 if (auto *CE = dyn_cast<CastExpr>(Op)) {
12773 if (CE->getCastKind() == CK_ArrayToPointerDecay)
12774 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context);
12775 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) {
12776 if (UO->getOpcode() == UO_AddrOf)
12777 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context);
12778 }
12779
12780 if (SrcAlign >= DestAlign) return;
12781
12782 Diag(TRange.getBegin(), diag::warn_cast_align)
12783 << Op->getType() << T
12784 << static_cast<unsigned>(SrcAlign.getQuantity())
12785 << static_cast<unsigned>(DestAlign.getQuantity())
12786 << TRange << Op->getSourceRange();
12787}
12788
12789/// Check whether this array fits the idiom of a size-one tail padded
12790/// array member of a struct.
12791///
12792/// We avoid emitting out-of-bounds access warnings for such arrays as they are
12793/// commonly used to emulate flexible arrays in C89 code.
12794static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size,
12795 const NamedDecl *ND) {
12796 if (Size != 1 || !ND) return false;
12797
12798 const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
12799 if (!FD) return false;
12800
12801 // Don't consider sizes resulting from macro expansions or template argument
12802 // substitution to form C89 tail-padded arrays.
12803
12804 TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
12805 while (TInfo) {
12806 TypeLoc TL = TInfo->getTypeLoc();
12807 // Look through typedefs.
12808 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) {
12809 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
12810 TInfo = TDL->getTypeSourceInfo();
12811 continue;
12812 }
12813 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
12814 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
12815 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
12816 return false;
12817 }
12818 break;
12819 }
12820
12821 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
12822 if (!RD) return false;
12823 if (RD->isUnion()) return false;
12824 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
12825 if (!CRD->isStandardLayout()) return false;
12826 }
12827
12828 // See if this is the last field decl in the record.
12829 const Decl *D = FD;
12830 while ((D = D->getNextDeclInContext()))
12831 if (isa<FieldDecl>(D))
12832 return false;
12833 return true;
12834}
12835
12836void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
12837 const ArraySubscriptExpr *ASE,
12838 bool AllowOnePastEnd, bool IndexNegated) {
12839 IndexExpr = IndexExpr->IgnoreParenImpCasts();
12840 if (IndexExpr->isValueDependent())
12841 return;
12842
12843 const Type *EffectiveType =
12844 BaseExpr->getType()->getPointeeOrArrayElementType();
12845 BaseExpr = BaseExpr->IgnoreParenCasts();
12846 const ConstantArrayType *ArrayTy =
12847 Context.getAsConstantArrayType(BaseExpr->getType());
12848
12849 if (!ArrayTy)
12850 return;
12851
12852 const Type *BaseType = ArrayTy->getElementType().getTypePtr();
12853 if (EffectiveType->isDependentType() || BaseType->isDependentType())
12854 return;
12855
12856 Expr::EvalResult Result;
12857 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
12858 return;
12859
12860 llvm::APSInt index = Result.Val.getInt();
12861 if (IndexNegated)
12862 index = -index;
12863
12864 const NamedDecl *ND = nullptr;
12865 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
12866 ND = DRE->getDecl();
12867 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
12868 ND = ME->getMemberDecl();
12869
12870 if (index.isUnsigned() || !index.isNegative()) {
12871 // It is possible that the type of the base expression after
12872 // IgnoreParenCasts is incomplete, even though the type of the base
12873 // expression before IgnoreParenCasts is complete (see PR39746 for an
12874 // example). In this case we have no information about whether the array
12875 // access exceeds the array bounds. However we can still diagnose an array
12876 // access which precedes the array bounds.
12877 if (BaseType->isIncompleteType())
12878 return;
12879
12880 llvm::APInt size = ArrayTy->getSize();
12881 if (!size.isStrictlyPositive())
12882 return;
12883
12884 if (BaseType != EffectiveType) {
12885 // Make sure we're comparing apples to apples when comparing index to size
12886 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
12887 uint64_t array_typesize = Context.getTypeSize(BaseType);
12888 // Handle ptrarith_typesize being zero, such as when casting to void*
12889 if (!ptrarith_typesize) ptrarith_typesize = 1;
12890 if (ptrarith_typesize != array_typesize) {
12891 // There's a cast to a different size type involved
12892 uint64_t ratio = array_typesize / ptrarith_typesize;
12893 // TODO: Be smarter about handling cases where array_typesize is not a
12894 // multiple of ptrarith_typesize
12895 if (ptrarith_typesize * ratio == array_typesize)
12896 size *= llvm::APInt(size.getBitWidth(), ratio);
12897 }
12898 }
12899
12900 if (size.getBitWidth() > index.getBitWidth())
12901 index = index.zext(size.getBitWidth());
12902 else if (size.getBitWidth() < index.getBitWidth())
12903 size = size.zext(index.getBitWidth());
12904
12905 // For array subscripting the index must be less than size, but for pointer
12906 // arithmetic also allow the index (offset) to be equal to size since
12907 // computing the next address after the end of the array is legal and
12908 // commonly done e.g. in C++ iterators and range-based for loops.
12909 if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
12910 return;
12911
12912 // Also don't warn for arrays of size 1 which are members of some
12913 // structure. These are often used to approximate flexible arrays in C89
12914 // code.
12915 if (IsTailPaddedMemberArray(*this, size, ND))
12916 return;
12917
12918 // Suppress the warning if the subscript expression (as identified by the
12919 // ']' location) and the index expression are both from macro expansions
12920 // within a system header.
12921 if (ASE) {
12922 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
12923 ASE->getRBracketLoc());
12924 if (SourceMgr.isInSystemHeader(RBracketLoc)) {
12925 SourceLocation IndexLoc =
12926 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc());
12927 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
12928 return;
12929 }
12930 }
12931
12932 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
12933 if (ASE)
12934 DiagID = diag::warn_array_index_exceeds_bounds;
12935
12936 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
12937 PDiag(DiagID) << index.toString(10, true)
12938 << size.toString(10, true)
12939 << (unsigned)size.getLimitedValue(~0U)
12940 << IndexExpr->getSourceRange());
12941 } else {
12942 unsigned DiagID = diag::warn_array_index_precedes_bounds;
12943 if (!ASE) {
12944 DiagID = diag::warn_ptr_arith_precedes_bounds;
12945 if (index.isNegative()) index = -index;
12946 }
12947
12948 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
12949 PDiag(DiagID) << index.toString(10, true)
12950 << IndexExpr->getSourceRange());
12951 }
12952
12953 if (!ND) {
12954 // Try harder to find a NamedDecl to point at in the note.
12955 while (const ArraySubscriptExpr *ASE =
12956 dyn_cast<ArraySubscriptExpr>(BaseExpr))
12957 BaseExpr = ASE->getBase()->IgnoreParenCasts();
12958 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
12959 ND = DRE->getDecl();
12960 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
12961 ND = ME->getMemberDecl();
12962 }
12963
12964 if (ND)
12965 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
12966 PDiag(diag::note_array_index_out_of_bounds)
12967 << ND->getDeclName());
12968}
12969
12970void Sema::CheckArrayAccess(const Expr *expr) {
12971 int AllowOnePastEnd = 0;
12972 while (expr) {
12973 expr = expr->IgnoreParenImpCasts();
12974 switch (expr->getStmtClass()) {
12975 case Stmt::ArraySubscriptExprClass: {
12976 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
12977 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
12978 AllowOnePastEnd > 0);
12979 expr = ASE->getBase();
12980 break;
12981 }
12982 case Stmt::MemberExprClass: {
12983 expr = cast<MemberExpr>(expr)->getBase();
12984 break;
12985 }
12986 case Stmt::OMPArraySectionExprClass: {
12987 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
12988 if (ASE->getLowerBound())
12989 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
12990 /*ASE=*/nullptr, AllowOnePastEnd > 0);
12991 return;
12992 }
12993 case Stmt::UnaryOperatorClass: {
12994 // Only unwrap the * and & unary operators
12995 const UnaryOperator *UO = cast<UnaryOperator>(expr);
12996 expr = UO->getSubExpr();
12997 switch (UO->getOpcode()) {
12998 case UO_AddrOf:
12999 AllowOnePastEnd++;
13000 break;
13001 case UO_Deref:
13002 AllowOnePastEnd--;
13003 break;
13004 default:
13005 return;
13006 }
13007 break;
13008 }
13009 case Stmt::ConditionalOperatorClass: {
13010 const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
13011 if (const Expr *lhs = cond->getLHS())
13012 CheckArrayAccess(lhs);
13013 if (const Expr *rhs = cond->getRHS())
13014 CheckArrayAccess(rhs);
13015 return;
13016 }
13017 case Stmt::CXXOperatorCallExprClass: {
13018 const auto *OCE = cast<CXXOperatorCallExpr>(expr);
13019 for (const auto *Arg : OCE->arguments())
13020 CheckArrayAccess(Arg);
13021 return;
13022 }
13023 default:
13024 return;
13025 }
13026 }
13027}
13028
13029//===--- CHECK: Objective-C retain cycles ----------------------------------//
13030
13031namespace {
13032
13033struct RetainCycleOwner {
13034 VarDecl *Variable = nullptr;
13035 SourceRange Range;
13036 SourceLocation Loc;
13037 bool Indirect = false;
13038
13039 RetainCycleOwner() = default;
13040
13041 void setLocsFrom(Expr *e) {
13042 Loc = e->getExprLoc();
13043 Range = e->getSourceRange();
13044 }
13045};
13046
13047} // namespace
13048
13049/// Consider whether capturing the given variable can possibly lead to
13050/// a retain cycle.
13051static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
13052 // In ARC, it's captured strongly iff the variable has __strong
13053 // lifetime. In MRR, it's captured strongly if the variable is
13054 // __block and has an appropriate type.
13055 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
13056 return false;
13057
13058 owner.Variable = var;
13059 if (ref)
13060 owner.setLocsFrom(ref);
13061 return true;
13062}
13063
13064static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
13065 while (true) {
13066 e = e->IgnoreParens();
13067 if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
13068 switch (cast->getCastKind()) {
13069 case CK_BitCast:
13070 case CK_LValueBitCast:
13071 case CK_LValueToRValue:
13072 case CK_ARCReclaimReturnedObject:
13073 e = cast->getSubExpr();
13074 continue;
13075
13076 default:
13077 return false;
13078 }
13079 }
13080
13081 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
13082 ObjCIvarDecl *ivar = ref->getDecl();
13083 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
13084 return false;
13085
13086 // Try to find a retain cycle in the base.
13087 if (!findRetainCycleOwner(S, ref->getBase(), owner))
13088 return false;
13089
13090 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
13091 owner.Indirect = true;
13092 return true;
13093 }
13094
13095 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
13096 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
13097 if (!var) return false;
13098 return considerVariable(var, ref, owner);
13099 }
13100
13101 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
13102 if (member->isArrow()) return false;
13103
13104 // Don't count this as an indirect ownership.
13105 e = member->getBase();
13106 continue;
13107 }
13108
13109 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
13110 // Only pay attention to pseudo-objects on property references.
13111 ObjCPropertyRefExpr *pre
13112 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
13113 ->IgnoreParens());
13114 if (!pre) return false;
13115 if (pre->isImplicitProperty()) return false;
13116 ObjCPropertyDecl *property = pre->getExplicitProperty();
13117 if (!property->isRetaining() &&
13118 !(property->getPropertyIvarDecl() &&
13119 property->getPropertyIvarDecl()->getType()
13120 .getObjCLifetime() == Qualifiers::OCL_Strong))
13121 return false;
13122
13123 owner.Indirect = true;
13124 if (pre->isSuperReceiver()) {
13125 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
13126 if (!owner.Variable)
13127 return false;
13128 owner.Loc = pre->getLocation();
13129 owner.Range = pre->getSourceRange();
13130 return true;
13131 }
13132 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
13133 ->getSourceExpr());
13134 continue;
13135 }
13136
13137 // Array ivars?
13138
13139 return false;
13140 }
13141}
13142
13143namespace {
13144
13145 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
13146 ASTContext &Context;
13147 VarDecl *Variable;
13148 Expr *Capturer = nullptr;
13149 bool VarWillBeReased = false;
13150
13151 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
13152 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
13153 Context(Context), Variable(variable) {}
13154
13155 void VisitDeclRefExpr(DeclRefExpr *ref) {
13156 if (ref->getDecl() == Variable && !Capturer)
13157 Capturer = ref;
13158 }
13159
13160 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
13161 if (Capturer) return;
13162 Visit(ref->getBase());
13163 if (Capturer && ref->isFreeIvar())
13164 Capturer = ref;
13165 }
13166
13167 void VisitBlockExpr(BlockExpr *block) {
13168 // Look inside nested blocks
13169 if (block->getBlockDecl()->capturesVariable(Variable))
13170 Visit(block->getBlockDecl()->getBody());
13171 }
13172
13173 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
13174 if (Capturer) return;
13175 if (OVE->getSourceExpr())
13176 Visit(OVE->getSourceExpr());
13177 }
13178
13179 void VisitBinaryOperator(BinaryOperator *BinOp) {
13180 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
13181 return;
13182 Expr *LHS = BinOp->getLHS();
13183 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
13184 if (DRE->getDecl() != Variable)
13185 return;
13186 if (Expr *RHS = BinOp->getRHS()) {
13187 RHS = RHS->IgnoreParenCasts();
13188 llvm::APSInt Value;
13189 VarWillBeReased =
13190 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0);
13191 }
13192 }
13193 }
13194 };
13195
13196} // namespace
13197
13198/// Check whether the given argument is a block which captures a
13199/// variable.
13200static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
13201 assert(owner.Variable && owner.Loc.isValid());
13202
13203 e = e->IgnoreParenCasts();
13204
13205 // Look through [^{...} copy] and Block_copy(^{...}).
13206 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
13207 Selector Cmd = ME->getSelector();
13208 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
13209 e = ME->getInstanceReceiver();
13210 if (!e)
13211 return nullptr;
13212 e = e->IgnoreParenCasts();
13213 }
13214 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
13215 if (CE->getNumArgs() == 1) {
13216 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
13217 if (Fn) {
13218 const IdentifierInfo *FnI = Fn->getIdentifier();
13219 if (FnI && FnI->isStr("_Block_copy")) {
13220 e = CE->getArg(0)->IgnoreParenCasts();
13221 }
13222 }
13223 }
13224 }
13225
13226 BlockExpr *block = dyn_cast<BlockExpr>(e);
13227 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
13228 return nullptr;
13229
13230 FindCaptureVisitor visitor(S.Context, owner.Variable);
13231 visitor.Visit(block->getBlockDecl()->getBody());
13232 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
13233}
13234
13235static void diagnoseRetainCycle(Sema &S, Expr *capturer,
13236 RetainCycleOwner &owner) {
13237 assert(capturer);
13238 assert(owner.Variable && owner.Loc.isValid());
13239
13240 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
13241 << owner.Variable << capturer->getSourceRange();
13242 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
13243 << owner.Indirect << owner.Range;
13244}
13245
13246/// Check for a keyword selector that starts with the word 'add' or
13247/// 'set'.
13248static bool isSetterLikeSelector(Selector sel) {
13249 if (sel.isUnarySelector()) return false;
13250
13251 StringRef str = sel.getNameForSlot(0);
13252 while (!str.empty() && str.front() == '_') str = str.substr(1);
13253 if (str.startswith("set"))
13254 str = str.substr(3);
13255 else if (str.startswith("add")) {
13256 // Specially whitelist 'addOperationWithBlock:'.
13257 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
13258 return false;
13259 str = str.substr(3);
13260 }
13261 else
13262 return false;
13263
13264 if (str.empty()) return true;
13265 return !isLowercase(str.front());
13266}
13267
13268static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
13269 ObjCMessageExpr *Message) {
13270 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
13271 Message->getReceiverInterface(),
13272 NSAPI::ClassId_NSMutableArray);
13273 if (!IsMutableArray) {
13274 return None;
13275 }
13276
13277 Selector Sel = Message->getSelector();
13278
13279 Optional<NSAPI::NSArrayMethodKind> MKOpt =
13280 S.NSAPIObj->getNSArrayMethodKind(Sel);
13281 if (!MKOpt) {
13282 return None;
13283 }
13284
13285 NSAPI::NSArrayMethodKind MK = *MKOpt;
13286
13287 switch (MK) {
13288 case NSAPI::NSMutableArr_addObject:
13289 case NSAPI::NSMutableArr_insertObjectAtIndex:
13290 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
13291 return 0;
13292 case NSAPI::NSMutableArr_replaceObjectAtIndex:
13293 return 1;
13294
13295 default:
13296 return None;
13297 }
13298
13299 return None;
13300}
13301
13302static
13303Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
13304 ObjCMessageExpr *Message) {
13305 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
13306 Message->getReceiverInterface(),
13307 NSAPI::ClassId_NSMutableDictionary);
13308 if (!IsMutableDictionary) {
13309 return None;
13310 }
13311
13312 Selector Sel = Message->getSelector();
13313
13314 Optional<NSAPI::NSDictionaryMethodKind> MKOpt =
13315 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
13316 if (!MKOpt) {
13317 return None;
13318 }
13319
13320 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
13321
13322 switch (MK) {
13323 case NSAPI::NSMutableDict_setObjectForKey:
13324 case NSAPI::NSMutableDict_setValueForKey:
13325 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
13326 return 0;
13327
13328 default:
13329 return None;
13330 }
13331
13332 return None;
13333}
13334
13335static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
13336 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
13337 Message->getReceiverInterface(),
13338 NSAPI::ClassId_NSMutableSet);
13339
13340 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
13341 Message->getReceiverInterface(),
13342 NSAPI::ClassId_NSMutableOrderedSet);
13343 if (!IsMutableSet && !IsMutableOrderedSet) {
13344 return None;
13345 }
13346
13347 Selector Sel = Message->getSelector();
13348
13349 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel);
13350 if (!MKOpt) {
13351 return None;
13352 }
13353
13354 NSAPI::NSSetMethodKind MK = *MKOpt;
13355
13356 switch (MK) {
13357 case NSAPI::NSMutableSet_addObject:
13358 case NSAPI::NSOrderedSet_setObjectAtIndex:
13359 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
13360 case NSAPI::NSOrderedSet_insertObjectAtIndex:
13361 return 0;
13362 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
13363 return 1;
13364 }
13365
13366 return None;
13367}
13368
13369void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
13370 if (!Message->isInstanceMessage()) {
13371 return;
13372 }
13373
13374 Optional<int> ArgOpt;
13375
13376 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
13377 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
13378 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
13379 return;
13380 }
13381
13382 int ArgIndex = *ArgOpt;
13383
13384 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
13385 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
13386 Arg = OE->getSourceExpr()->IgnoreImpCasts();
13387 }
13388
13389 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
13390 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
13391 if (ArgRE->isObjCSelfExpr()) {
13392 Diag(Message->getSourceRange().getBegin(),
13393 diag::warn_objc_circular_container)
13394 << ArgRE->getDecl() << StringRef("'super'");
13395 }
13396 }
13397 } else {
13398 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
13399
13400 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
13401 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
13402 }
13403
13404 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
13405 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
13406 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
13407 ValueDecl *Decl = ReceiverRE->getDecl();
13408 Diag(Message->getSourceRange().getBegin(),
13409 diag::warn_objc_circular_container)
13410 << Decl << Decl;
13411 if (!ArgRE->isObjCSelfExpr()) {
13412 Diag(Decl->getLocation(),
13413 diag::note_objc_circular_container_declared_here)
13414 << Decl;
13415 }
13416 }
13417 }
13418 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
13419 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
13420 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
13421 ObjCIvarDecl *Decl = IvarRE->getDecl();
13422 Diag(Message->getSourceRange().getBegin(),
13423 diag::warn_objc_circular_container)
13424 << Decl << Decl;
13425 Diag(Decl->getLocation(),
13426 diag::note_objc_circular_container_declared_here)
13427 << Decl;
13428 }
13429 }
13430 }
13431 }
13432}
13433
13434/// Check a message send to see if it's likely to cause a retain cycle.
13435void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
13436 // Only check instance methods whose selector looks like a setter.
13437 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
13438 return;
13439
13440 // Try to find a variable that the receiver is strongly owned by.
13441 RetainCycleOwner owner;
13442 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
13443 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
13444 return;
13445 } else {
13446 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
13447 owner.Variable = getCurMethodDecl()->getSelfDecl();
13448 owner.Loc = msg->getSuperLoc();
13449 owner.Range = msg->getSuperLoc();
13450 }
13451
13452 // Check whether the receiver is captured by any of the arguments.
13453 const ObjCMethodDecl *MD = msg->getMethodDecl();
13454 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
13455 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) {
13456 // noescape blocks should not be retained by the method.
13457 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
13458 continue;
13459 return diagnoseRetainCycle(*this, capturer, owner);
13460 }
13461 }
13462}
13463
13464/// Check a property assign to see if it's likely to cause a retain cycle.
13465void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
13466 RetainCycleOwner owner;
13467 if (!findRetainCycleOwner(*this, receiver, owner))
13468 return;
13469
13470 if (Expr *capturer = findCapturingExpr(*this, argument, owner))
13471 diagnoseRetainCycle(*this, capturer, owner);
13472}
13473
13474void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
13475 RetainCycleOwner Owner;
13476 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
13477 return;
13478
13479 // Because we don't have an expression for the variable, we have to set the
13480 // location explicitly here.
13481 Owner.Loc = Var->getLocation();
13482 Owner.Range = Var->getSourceRange();
13483
13484 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
13485 diagnoseRetainCycle(*this, Capturer, Owner);
13486}
13487
13488static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
13489 Expr *RHS, bool isProperty) {
13490 // Check if RHS is an Objective-C object literal, which also can get
13491 // immediately zapped in a weak reference. Note that we explicitly
13492 // allow ObjCStringLiterals, since those are designed to never really die.
13493 RHS = RHS->IgnoreParenImpCasts();
13494
13495 // This enum needs to match with the 'select' in
13496 // warn_objc_arc_literal_assign (off-by-1).
13497 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
13498 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
13499 return false;
13500
13501 S.Diag(Loc, diag::warn_arc_literal_assign)
13502 << (unsigned) Kind
13503 << (isProperty ? 0 : 1)
13504 << RHS->getSourceRange();
13505
13506 return true;
13507}
13508
13509static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
13510 Qualifiers::ObjCLifetime LT,
13511 Expr *RHS, bool isProperty) {
13512 // Strip off any implicit cast added to get to the one ARC-specific.
13513 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
13514 if (cast->getCastKind() == CK_ARCConsumeObject) {
13515 S.Diag(Loc, diag::warn_arc_retained_assign)
13516 << (LT == Qualifiers::OCL_ExplicitNone)
13517 << (isProperty ? 0 : 1)
13518 << RHS->getSourceRange();
13519 return true;
13520 }
13521 RHS = cast->getSubExpr();
13522 }
13523
13524 if (LT == Qualifiers::OCL_Weak &&
13525 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
13526 return true;
13527
13528 return false;
13529}
13530
13531bool Sema::checkUnsafeAssigns(SourceLocation Loc,
13532 QualType LHS, Expr *RHS) {
13533 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
13534
13535 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
13536 return false;
13537
13538 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false))
13539 return true;
13540
13541 return false;
13542}
13543
13544void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
13545 Expr *LHS, Expr *RHS) {
13546 QualType LHSType;
13547 // PropertyRef on LHS type need be directly obtained from
13548 // its declaration as it has a PseudoType.
13549 ObjCPropertyRefExpr *PRE
13550 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
13551 if (PRE && !PRE->isImplicitProperty()) {
13552 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
13553 if (PD)
13554 LHSType = PD->getType();
13555 }
13556
13557 if (LHSType.isNull())
13558 LHSType = LHS->getType();
13559
13560 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
13561
13562 if (LT == Qualifiers::OCL_Weak) {
13563 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
13564 getCurFunction()->markSafeWeakUse(LHS);
13565 }
13566
13567 if (checkUnsafeAssigns(Loc, LHSType, RHS))
13568 return;
13569
13570 // FIXME. Check for other life times.
13571 if (LT != Qualifiers::OCL_None)
13572 return;
13573
13574 if (PRE) {
13575 if (PRE->isImplicitProperty())
13576 return;
13577 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
13578 if (!PD)
13579 return;
13580
13581 unsigned Attributes = PD->getPropertyAttributes();
13582 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
13583 // when 'assign' attribute was not explicitly specified
13584 // by user, ignore it and rely on property type itself
13585 // for lifetime info.
13586 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
13587 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
13588 LHSType->isObjCRetainableType())
13589 return;
13590
13591 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
13592 if (cast->getCastKind() == CK_ARCConsumeObject) {
13593 Diag(Loc, diag::warn_arc_retained_property_assign)
13594 << RHS->getSourceRange();
13595 return;
13596 }
13597 RHS = cast->getSubExpr();
13598 }
13599 }
13600 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
13601 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
13602 return;
13603 }
13604 }
13605}
13606
13607//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
13608
13609static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
13610 SourceLocation StmtLoc,
13611 const NullStmt *Body) {
13612 // Do not warn if the body is a macro that expands to nothing, e.g:
13613 //
13614 // #define CALL(x)
13615 // if (condition)
13616 // CALL(0);
13617 if (Body->hasLeadingEmptyMacro())
13618 return false;
13619
13620 // Get line numbers of statement and body.
13621 bool StmtLineInvalid;
13622 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc,
13623 &StmtLineInvalid);
13624 if (StmtLineInvalid)
13625 return false;
13626
13627 bool BodyLineInvalid;
13628 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
13629 &BodyLineInvalid);
13630 if (BodyLineInvalid)
13631 return false;
13632
13633 // Warn if null statement and body are on the same line.
13634 if (StmtLine != BodyLine)
13635 return false;
13636
13637 return true;
13638}
13639
13640void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
13641 const Stmt *Body,
13642 unsigned DiagID) {
13643 // Since this is a syntactic check, don't emit diagnostic for template
13644 // instantiations, this just adds noise.
13645 if (CurrentInstantiationScope)
13646 return;
13647
13648 // The body should be a null statement.
13649 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
13650 if (!NBody)
13651 return;
13652
13653 // Do the usual checks.
13654 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
13655 return;
13656
13657 Diag(NBody->getSemiLoc(), DiagID);
13658 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
13659}
13660
13661void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
13662 const Stmt *PossibleBody) {
13663 assert(!CurrentInstantiationScope); // Ensured by caller
13664
13665 SourceLocation StmtLoc;
13666 const Stmt *Body;
13667 unsigned DiagID;
13668 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
13669 StmtLoc = FS->getRParenLoc();
13670 Body = FS->getBody();
13671 DiagID = diag::warn_empty_for_body;
13672 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
13673 StmtLoc = WS->getCond()->getSourceRange().getEnd();
13674 Body = WS->getBody();
13675 DiagID = diag::warn_empty_while_body;
13676 } else
13677 return; // Neither `for' nor `while'.
13678
13679 // The body should be a null statement.
13680 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
13681 if (!NBody)
13682 return;
13683
13684 // Skip expensive checks if diagnostic is disabled.
13685 if (Diags.isIgnored(DiagID, NBody->getSemiLoc()))
13686 return;
13687
13688 // Do the usual checks.
13689 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
13690 return;
13691
13692 // `for(...);' and `while(...);' are popular idioms, so in order to keep
13693 // noise level low, emit diagnostics only if for/while is followed by a
13694 // CompoundStmt, e.g.:
13695 // for (int i = 0; i < n; i++);
13696 // {
13697 // a(i);
13698 // }
13699 // or if for/while is followed by a statement with more indentation
13700 // than for/while itself:
13701 // for (int i = 0; i < n; i++);
13702 // a(i);
13703 bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
13704 if (!ProbableTypo) {
13705 bool BodyColInvalid;
13706 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
13707 PossibleBody->getBeginLoc(), &BodyColInvalid);
13708 if (BodyColInvalid)
13709 return;
13710
13711 bool StmtColInvalid;
13712 unsigned StmtCol =
13713 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid);
13714 if (StmtColInvalid)
13715 return;
13716
13717 if (BodyCol > StmtCol)
13718 ProbableTypo = true;
13719 }
13720
13721 if (ProbableTypo) {
13722 Diag(NBody->getSemiLoc(), DiagID);
13723 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
13724 }
13725}
13726
13727//===--- CHECK: Warn on self move with std::move. -------------------------===//
13728
13729/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
13730void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
13731 SourceLocation OpLoc) {
13732 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
13733 return;
13734
13735 if (inTemplateInstantiation())
13736 return;
13737
13738 // Strip parens and casts away.
13739 LHSExpr = LHSExpr->IgnoreParenImpCasts();
13740 RHSExpr = RHSExpr->IgnoreParenImpCasts();
13741
13742 // Check for a call expression
13743 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
13744 if (!CE || CE->getNumArgs() != 1)
13745 return;
13746
13747 // Check for a call to std::move
13748 if (!CE->isCallToStdMove())
13749 return;
13750
13751 // Get argument from std::move
13752 RHSExpr = CE->getArg(0);
13753
13754 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
13755 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
13756
13757 // Two DeclRefExpr's, check that the decls are the same.
13758 if (LHSDeclRef && RHSDeclRef) {
13759 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
13760 return;
13761 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
13762 RHSDeclRef->getDecl()->getCanonicalDecl())
13763 return;
13764
13765 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13766 << LHSExpr->getSourceRange()
13767 << RHSExpr->getSourceRange();
13768 return;
13769 }
13770
13771 // Member variables require a different approach to check for self moves.
13772 // MemberExpr's are the same if every nested MemberExpr refers to the same
13773 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
13774 // the base Expr's are CXXThisExpr's.
13775 const Expr *LHSBase = LHSExpr;
13776 const Expr *RHSBase = RHSExpr;
13777 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
13778 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
13779 if (!LHSME || !RHSME)
13780 return;
13781
13782 while (LHSME && RHSME) {
13783 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
13784 RHSME->getMemberDecl()->getCanonicalDecl())
13785 return;
13786
13787 LHSBase = LHSME->getBase();
13788 RHSBase = RHSME->getBase();
13789 LHSME = dyn_cast<MemberExpr>(LHSBase);
13790 RHSME = dyn_cast<MemberExpr>(RHSBase);
13791 }
13792
13793 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
13794 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
13795 if (LHSDeclRef && RHSDeclRef) {
13796 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
13797 return;
13798 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
13799 RHSDeclRef->getDecl()->getCanonicalDecl())
13800 return;
13801
13802 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13803 << LHSExpr->getSourceRange()
13804 << RHSExpr->getSourceRange();
13805 return;
13806 }
13807
13808 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
13809 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13810 << LHSExpr->getSourceRange()
13811 << RHSExpr->getSourceRange();
13812}
13813
13814//===--- Layout compatibility ----------------------------------------------//
13815
13816static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
13817
13818/// Check if two enumeration types are layout-compatible.
13819static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
13820 // C++11 [dcl.enum] p8:
13821 // Two enumeration types are layout-compatible if they have the same
13822 // underlying type.
13823 return ED1->isComplete() && ED2->isComplete() &&
13824 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
13825}
13826
13827/// Check if two fields are layout-compatible.
13828static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
13829 FieldDecl *Field2) {
13830 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
13831 return false;
13832
13833 if (Field1->isBitField() != Field2->isBitField())
13834 return false;
13835
13836 if (Field1->isBitField()) {
13837 // Make sure that the bit-fields are the same length.
13838 unsigned Bits1 = Field1->getBitWidthValue(C);
13839 unsigned Bits2 = Field2->getBitWidthValue(C);
13840
13841 if (Bits1 != Bits2)
13842 return false;
13843 }
13844
13845 return true;
13846}
13847
13848/// Check if two standard-layout structs are layout-compatible.
13849/// (C++11 [class.mem] p17)
13850static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
13851 RecordDecl *RD2) {
13852 // If both records are C++ classes, check that base classes match.
13853 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
13854 // If one of records is a CXXRecordDecl we are in C++ mode,
13855 // thus the other one is a CXXRecordDecl, too.
13856 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
13857 // Check number of base classes.
13858 if (D1CXX->getNumBases() != D2CXX->getNumBases())
13859 return false;
13860
13861 // Check the base classes.
13862 for (CXXRecordDecl::base_class_const_iterator
13863 Base1 = D1CXX->bases_begin(),
13864 BaseEnd1 = D1CXX->bases_end(),
13865 Base2 = D2CXX->bases_begin();
13866 Base1 != BaseEnd1;
13867 ++Base1, ++Base2) {
13868 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
13869 return false;
13870 }
13871 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
13872 // If only RD2 is a C++ class, it should have zero base classes.
13873 if (D2CXX->getNumBases() > 0)
13874 return false;
13875 }
13876
13877 // Check the fields.
13878 RecordDecl::field_iterator Field2 = RD2->field_begin(),
13879 Field2End = RD2->field_end(),
13880 Field1 = RD1->field_begin(),
13881 Field1End = RD1->field_end();
13882 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
13883 if (!isLayoutCompatible(C, *Field1, *Field2))
13884 return false;
13885 }
13886 if (Field1 != Field1End || Field2 != Field2End)
13887 return false;
13888
13889 return true;
13890}
13891
13892/// Check if two standard-layout unions are layout-compatible.
13893/// (C++11 [class.mem] p18)
13894static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
13895 RecordDecl *RD2) {
13896 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
13897 for (auto *Field2 : RD2->fields())
13898 UnmatchedFields.insert(Field2);
13899
13900 for (auto *Field1 : RD1->fields()) {
13901 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
13902 I = UnmatchedFields.begin(),
13903 E = UnmatchedFields.end();
13904
13905 for ( ; I != E; ++I) {
13906 if (isLayoutCompatible(C, Field1, *I)) {
13907 bool Result = UnmatchedFields.erase(*I);
13908 (void) Result;
13909 assert(Result);
13910 break;
13911 }
13912 }
13913 if (I == E)
13914 return false;
13915 }
13916
13917 return UnmatchedFields.empty();
13918}
13919
13920static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
13921 RecordDecl *RD2) {
13922 if (RD1->isUnion() != RD2->isUnion())
13923 return false;
13924
13925 if (RD1->isUnion())
13926 return isLayoutCompatibleUnion(C, RD1, RD2);
13927 else
13928 return isLayoutCompatibleStruct(C, RD1, RD2);
13929}
13930
13931/// Check if two types are layout-compatible in C++11 sense.
13932static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
13933 if (T1.isNull() || T2.isNull())
13934 return false;
13935
13936 // C++11 [basic.types] p11:
13937 // If two types T1 and T2 are the same type, then T1 and T2 are
13938 // layout-compatible types.
13939 if (C.hasSameType(T1, T2))
13940 return true;
13941
13942 T1 = T1.getCanonicalType().getUnqualifiedType();
13943 T2 = T2.getCanonicalType().getUnqualifiedType();
13944
13945 const Type::TypeClass TC1 = T1->getTypeClass();
13946 const Type::TypeClass TC2 = T2->getTypeClass();
13947
13948 if (TC1 != TC2)
13949 return false;
13950
13951 if (TC1 == Type::Enum) {
13952 return isLayoutCompatible(C,
13953 cast<EnumType>(T1)->getDecl(),
13954 cast<EnumType>(T2)->getDecl());
13955 } else if (TC1 == Type::Record) {
13956 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
13957 return false;
13958
13959 return isLayoutCompatible(C,
13960 cast<RecordType>(T1)->getDecl(),
13961 cast<RecordType>(T2)->getDecl());
13962 }
13963
13964 return false;
13965}
13966
13967//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
13968
13969/// Given a type tag expression find the type tag itself.
13970///
13971/// \param TypeExpr Type tag expression, as it appears in user's code.
13972///
13973/// \param VD Declaration of an identifier that appears in a type tag.
13974///
13975/// \param MagicValue Type tag magic value.
13976static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
13977 const ValueDecl **VD, uint64_t *MagicValue) {
13978 while(true) {
13979 if (!TypeExpr)
13980 return false;
13981
13982 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
13983
13984 switch (TypeExpr->getStmtClass()) {
13985 case Stmt::UnaryOperatorClass: {
13986 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
13987 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
13988 TypeExpr = UO->getSubExpr();
13989 continue;
13990 }
13991 return false;
13992 }
13993
13994 case Stmt::DeclRefExprClass: {
13995 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
13996 *VD = DRE->getDecl();
13997 return true;
13998 }
13999
14000 case Stmt::IntegerLiteralClass: {
14001 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
14002 llvm::APInt MagicValueAPInt = IL->getValue();
14003 if (MagicValueAPInt.getActiveBits() <= 64) {
14004 *MagicValue = MagicValueAPInt.getZExtValue();
14005 return true;
14006 } else
14007 return false;
14008 }
14009
14010 case Stmt::BinaryConditionalOperatorClass:
14011 case Stmt::ConditionalOperatorClass: {
14012 const AbstractConditionalOperator *ACO =
14013 cast<AbstractConditionalOperator>(TypeExpr);
14014 bool Result;
14015 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) {
14016 if (Result)
14017 TypeExpr = ACO->getTrueExpr();
14018 else
14019 TypeExpr = ACO->getFalseExpr();
14020 continue;
14021 }
14022 return false;
14023 }
14024
14025 case Stmt::BinaryOperatorClass: {
14026 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
14027 if (BO->getOpcode() == BO_Comma) {
14028 TypeExpr = BO->getRHS();
14029 continue;
14030 }
14031 return false;
14032 }
14033
14034 default:
14035 return false;
14036 }
14037 }
14038}
14039
14040/// Retrieve the C type corresponding to type tag TypeExpr.
14041///
14042/// \param TypeExpr Expression that specifies a type tag.
14043///
14044/// \param MagicValues Registered magic values.
14045///
14046/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
14047/// kind.
14048///
14049/// \param TypeInfo Information about the corresponding C type.
14050///
14051/// \returns true if the corresponding C type was found.
14052static bool GetMatchingCType(
14053 const IdentifierInfo *ArgumentKind,
14054 const Expr *TypeExpr, const ASTContext &Ctx,
14055 const llvm::DenseMap<Sema::TypeTagMagicValue,
14056 Sema::TypeTagData> *MagicValues,
14057 bool &FoundWrongKind,
14058 Sema::TypeTagData &TypeInfo) {
14059 FoundWrongKind = false;
14060
14061 // Variable declaration that has type_tag_for_datatype attribute.
14062 const ValueDecl *VD = nullptr;
14063
14064 uint64_t MagicValue;
14065
14066 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue))
14067 return false;
14068
14069 if (VD) {
14070 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
14071 if (I->getArgumentKind() != ArgumentKind) {
14072 FoundWrongKind = true;
14073 return false;
14074 }
14075 TypeInfo.Type = I->getMatchingCType();
14076 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
14077 TypeInfo.MustBeNull = I->getMustBeNull();
14078 return true;
14079 }
14080 return false;
14081 }
14082
14083 if (!MagicValues)
14084 return false;
14085
14086 llvm::DenseMap<Sema::TypeTagMagicValue,
14087 Sema::TypeTagData>::const_iterator I =
14088 MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
14089 if (I == MagicValues->end())
14090 return false;
14091
14092 TypeInfo = I->second;
14093 return true;
14094}
14095
14096void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
14097 uint64_t MagicValue, QualType Type,
14098 bool LayoutCompatible,
14099 bool MustBeNull) {
14100 if (!TypeTagForDatatypeMagicValues)
14101 TypeTagForDatatypeMagicValues.reset(
14102 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
14103
14104 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
14105 (*TypeTagForDatatypeMagicValues)[Magic] =
14106 TypeTagData(Type, LayoutCompatible, MustBeNull);
14107}
14108
14109static bool IsSameCharType(QualType T1, QualType T2) {
14110 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
14111 if (!BT1)
14112 return false;
14113
14114 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
14115 if (!BT2)
14116 return false;
14117
14118 BuiltinType::Kind T1Kind = BT1->getKind();
14119 BuiltinType::Kind T2Kind = BT2->getKind();
14120
14121 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
14122 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
14123 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
14124 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
14125}
14126
14127void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
14128 const ArrayRef<const Expr *> ExprArgs,
14129 SourceLocation CallSiteLoc) {
14130 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
14131 bool IsPointerAttr = Attr->getIsPointer();
14132
14133 // Retrieve the argument representing the 'type_tag'.
14134 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
14135 if (TypeTagIdxAST >= ExprArgs.size()) {
14136 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
14137 << 0 << Attr->getTypeTagIdx().getSourceIndex();
14138 return;
14139 }
14140 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
14141 bool FoundWrongKind;
14142 TypeTagData TypeInfo;
14143 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
14144 TypeTagForDatatypeMagicValues.get(),
14145 FoundWrongKind, TypeInfo)) {
14146 if (FoundWrongKind)
14147 Diag(TypeTagExpr->getExprLoc(),
14148 diag::warn_type_tag_for_datatype_wrong_kind)
14149 << TypeTagExpr->getSourceRange();
14150 return;
14151 }
14152
14153 // Retrieve the argument representing the 'arg_idx'.
14154 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
14155 if (ArgumentIdxAST >= ExprArgs.size()) {
14156 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
14157 << 1 << Attr->getArgumentIdx().getSourceIndex();
14158 return;
14159 }
14160 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
14161 if (IsPointerAttr) {
14162 // Skip implicit cast of pointer to `void *' (as a function argument).
14163 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
14164 if (ICE->getType()->isVoidPointerType() &&
14165 ICE->getCastKind() == CK_BitCast)
14166 ArgumentExpr = ICE->getSubExpr();
14167 }
14168 QualType ArgumentType = ArgumentExpr->getType();
14169
14170 // Passing a `void*' pointer shouldn't trigger a warning.
14171 if (IsPointerAttr && ArgumentType->isVoidPointerType())
14172 return;
14173
14174 if (TypeInfo.MustBeNull) {
14175 // Type tag with matching void type requires a null pointer.
14176 if (!ArgumentExpr->isNullPointerConstant(Context,
14177 Expr::NPC_ValueDependentIsNotNull)) {
14178 Diag(ArgumentExpr->getExprLoc(),
14179 diag::warn_type_safety_null_pointer_required)
14180 << ArgumentKind->getName()
14181 << ArgumentExpr->getSourceRange()
14182 << TypeTagExpr->getSourceRange();
14183 }
14184 return;
14185 }
14186
14187 QualType RequiredType = TypeInfo.Type;
14188 if (IsPointerAttr)
14189 RequiredType = Context.getPointerType(RequiredType);
14190
14191 bool mismatch = false;
14192 if (!TypeInfo.LayoutCompatible) {
14193 mismatch = !Context.hasSameType(ArgumentType, RequiredType);
14194
14195 // C++11 [basic.fundamental] p1:
14196 // Plain char, signed char, and unsigned char are three distinct types.
14197 //
14198 // But we treat plain `char' as equivalent to `signed char' or `unsigned
14199 // char' depending on the current char signedness mode.
14200 if (mismatch)
14201 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
14202 RequiredType->getPointeeType())) ||
14203 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
14204 mismatch = false;
14205 } else
14206 if (IsPointerAttr)
14207 mismatch = !isLayoutCompatible(Context,
14208 ArgumentType->getPointeeType(),
14209 RequiredType->getPointeeType());
14210 else
14211 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
14212
14213 if (mismatch)
14214 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
14215 << ArgumentType << ArgumentKind
14216 << TypeInfo.LayoutCompatible << RequiredType
14217 << ArgumentExpr->getSourceRange()
14218 << TypeTagExpr->getSourceRange();
14219}
14220
14221void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
14222 CharUnits Alignment) {
14223 MisalignedMembers.emplace_back(E, RD, MD, Alignment);
14224}
14225
14226void Sema::DiagnoseMisalignedMembers() {
14227 for (MisalignedMember &m : MisalignedMembers) {
14228 const NamedDecl *ND = m.RD;
14229 if (ND->getName().empty()) {
14230 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
14231 ND = TD;
14232 }
14233 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
14234 << m.MD << ND << m.E->getSourceRange();
14235 }
14236 MisalignedMembers.clear();
14237}
14238
14239void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
14240 E = E->IgnoreParens();
14241 if (!T->isPointerType() && !T->isIntegerType())
14242 return;
14243 if (isa<UnaryOperator>(E) &&
14244 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
14245 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
14246 if (isa<MemberExpr>(Op)) {
14247 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
14248 if (MA != MisalignedMembers.end() &&
14249 (T->isIntegerType() ||
14250 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
14251 Context.getTypeAlignInChars(
14252 T->getPointeeType()) <= MA->Alignment))))
14253 MisalignedMembers.erase(MA);
14254 }
14255 }
14256}
14257
14258void Sema::RefersToMemberWithReducedAlignment(
14259 Expr *E,
14260 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
14261 Action) {
14262 const auto *ME = dyn_cast<MemberExpr>(E);
14263 if (!ME)
14264 return;
14265
14266 // No need to check expressions with an __unaligned-qualified type.
14267 if (E->getType().getQualifiers().hasUnaligned())
14268 return;
14269
14270 // For a chain of MemberExpr like "a.b.c.d" this list
14271 // will keep FieldDecl's like [d, c, b].
14272 SmallVector<FieldDecl *, 4> ReverseMemberChain;
14273 const MemberExpr *TopME = nullptr;
14274 bool AnyIsPacked = false;
14275 do {
14276 QualType BaseType = ME->getBase()->getType();
14277 if (ME->isArrow())
14278 BaseType = BaseType->getPointeeType();
14279 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl();
14280 if (RD->isInvalidDecl())
14281 return;
14282
14283 ValueDecl *MD = ME->getMemberDecl();
14284 auto *FD = dyn_cast<FieldDecl>(MD);
14285 // We do not care about non-data members.
14286 if (!FD || FD->isInvalidDecl())
14287 return;
14288
14289 AnyIsPacked =
14290 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
14291 ReverseMemberChain.push_back(FD);
14292
14293 TopME = ME;
14294 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens());
14295 } while (ME);
14296 assert(TopME && "We did not compute a topmost MemberExpr!");
14297
14298 // Not the scope of this diagnostic.
14299 if (!AnyIsPacked)
14300 return;
14301
14302 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
14303 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase);
14304 // TODO: The innermost base of the member expression may be too complicated.
14305 // For now, just disregard these cases. This is left for future
14306 // improvement.
14307 if (!DRE && !isa<CXXThisExpr>(TopBase))
14308 return;
14309
14310 // Alignment expected by the whole expression.
14311 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());
14312
14313 // No need to do anything else with this case.
14314 if (ExpectedAlignment.isOne())
14315 return;
14316
14317 // Synthesize offset of the whole access.
14318 CharUnits Offset;
14319 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend();
14320 I++) {
14321 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I));
14322 }
14323
14324 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
14325 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
14326 ReverseMemberChain.back()->getParent()->getTypeForDecl());
14327
14328 // The base expression of the innermost MemberExpr may give
14329 // stronger guarantees than the class containing the member.
14330 if (DRE && !TopME->isArrow()) {
14331 const ValueDecl *VD = DRE->getDecl();
14332 if (!VD->getType()->isReferenceType())
14333 CompleteObjectAlignment =
14334 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD));
14335 }
14336
14337 // Check if the synthesized offset fulfills the alignment.
14338 if (Offset % ExpectedAlignment != 0 ||
14339 // It may fulfill the offset it but the effective alignment may still be
14340 // lower than the expected expression alignment.
14341 CompleteObjectAlignment < ExpectedAlignment) {
14342 // If this happens, we want to determine a sensible culprit of this.
14343 // Intuitively, watching the chain of member expressions from right to
14344 // left, we start with the required alignment (as required by the field
14345 // type) but some packed attribute in that chain has reduced the alignment.
14346 // It may happen that another packed structure increases it again. But if
14347 // we are here such increase has not been enough. So pointing the first
14348 // FieldDecl that either is packed or else its RecordDecl is,
14349 // seems reasonable.
14350 FieldDecl *FD = nullptr;
14351 CharUnits Alignment;
14352 for (FieldDecl *FDI : ReverseMemberChain) {
14353 if (FDI->hasAttr<PackedAttr>() ||
14354 FDI->getParent()->hasAttr<PackedAttr>()) {
14355 FD = FDI;
14356 Alignment = std::min(
14357 Context.getTypeAlignInChars(FD->getType()),
14358 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
14359 break;
14360 }
14361 }
14362 assert(FD && "We did not find a packed FieldDecl!");
14363 Action(E, FD->getParent(), FD, Alignment);
14364 }
14365}
14366
14367void Sema::CheckAddressOfPackedMember(Expr *rhs) {
14368 using namespace std::placeholders;
14369
14370 RefersToMemberWithReducedAlignment(
14371 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,
14372 _2, _3, _4));
14373}
14374